0


加载预训练模型遇到transformers的问题

问题1: urllib.error.URLError: urlopen error [SSL: CERTIFICATE_VERIFY_FAILED]

这是由于ssl验证的问题,在.py 文件的开头导入ssl包,创建默认验证:

import ssl
ssl._create_default_https_context = ssl._create_unverified_context

问题2: ImportError: cannot import name ‘AutoModelWithHeads’ from ‘transformers’

出现这个问题的原因是pip install transformers版本太低了,新版本4.0以上版本替换了一些接口。
解决方法:

1. 删除根目录下.cashe隐藏文件下的hub文件夹
2. 卸载旧版本:pip uninstall transformers
3. 直接安装新版本:pip install transformers (我的默认4.16.2)
4. 在python程序中执行以下命令验证:
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")
model = AutoModel.from_pretrained("bert-base-chinese")# 调用模型获得隐层输出,需要添加return_dict, 以便返回嵌入张量的表示
 encoder_layers, _ = model(token_tensor, return_dict=False)

问题3: torch.hub.load(‘huggingface/pytorch-transformers’, ‘model’, ‘bert-base-chinese’)报错

这是旧版transformers版本支持的命令,transformers版本超过4.0以上不能使用此命令。
解决方案1-继续使用旧版本:

1. 卸载最新版的transformers: pip uninstall transformers
2.# 克隆huggingface的transfomers文件git clone https://github.com/huggingface/transformers.git
# 进行transformers文件夹cd transformers

# 安装python的transformer工具包, 因为微调脚本是py文件.
pip install.# 当前的版本可能跟我们教学的版本并不相同,你还需要执行:

pip install transformers==2.3.0

# 进入微调脚本所在路径并查看cd examples/torch/
ls
  1. 打开glue.py文件,用以下代码替换
# coding=utf-8# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""from __future__ import absolute_import, division, print_function

import argparse
import glob
import logging
import os
import random

import numpy as np
import torch
from torch.utils.data import(DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)from torch.utils.data.distributed import DistributedSampler

try:from torch.utils.tensorboard import SummaryWriter
except:from tensorboardX import SummaryWriter

from tqdm import tqdm, trange

from transformers import(WEIGHTS_NAME, BertConfig,
                                  BertForSequenceClassification, BertTokenizer,
                                  RobertaConfig,
                                  RobertaForSequenceClassification,
                                  RobertaTokenizer,
                                  XLMConfig, XLMForSequenceClassification,
                                  XLMTokenizer, XLNetConfig,
                                  XLNetForSequenceClassification,
                                  XLNetTokenizer,
                                  DistilBertConfig,
                                  DistilBertForSequenceClassification,
                                  DistilBertTokenizer,
                                  AlbertConfig,
                                  AlbertForSequenceClassification, 
                                  AlbertTokenizer,)from transformers import AdamW, get_linear_schedule_with_warmup

from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import glue_convert_examples_to_features as convert_examples_to_features

logger = logging.getLogger(__name__)

ALL_MODELS =sum((tuple(conf.pretrained_config_archive_map.keys())for conf in(BertConfig, XLNetConfig, XLMConfig, 
                                                                                RobertaConfig, DistilBertConfig)),())

MODEL_CLASSES ={'bert':(BertConfig, BertForSequenceClassification, BertTokenizer),'xlnet':(XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),'xlm':(XLMConfig, XLMForSequenceClassification, XLMTokenizer),'roberta':(RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),'distilbert':(DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),'albert':(AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer)}defset_seed(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)if args.n_gpu >0:
        torch.cuda.manual_seed_all(args.seed)deftrain(args, train_dataset, model, tokenizer):""" Train the model """if args.local_rank in[-1,0]:
        tb_writer = SummaryWriter()

    args.train_batch_size = args.per_gpu_train_batch_size *max(1, args.n_gpu)
    train_sampler = RandomSampler(train_dataset)if args.local_rank ==-1else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)if args.max_steps >0:
        t_total = args.max_steps
        args.num_train_epochs = args.max_steps //(len(train_dataloader)// args.gradient_accumulation_steps)+1else:
        t_total =len(train_dataloader)// args.gradient_accumulation_steps * args.num_train_epochs

    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay =['bias','LayerNorm.weight']
    optimizer_grouped_parameters =[{'params':[p for n, p in model.named_parameters()ifnotany(nd in n for nd in no_decay)],'weight_decay': args.weight_decay},{'params':[p for n, p in model.named_parameters()ifany(nd in n for nd in no_decay)],'weight_decay':0.0}]

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)if args.fp16:try:from apex import amp
        except ImportError:raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)# multi-gpu training (should be after apex fp16 initialization)if args.n_gpu >1:
        model = torch.nn.DataParallel(model)# Distributed training (should be after apex fp16 initialization)if args.local_rank !=-1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)# Train!
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d",len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                   args.train_batch_size * args.gradient_accumulation_steps *(torch.distributed.get_world_size()if args.local_rank !=-1else1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    global_step =0
    tr_loss, logging_loss =0.0,0.0
    model.zero_grad()
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank notin[-1,0])
    set_seed(args)# Added here for reproductibility (even between python 2 and 3)for _ in train_iterator:
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank notin[-1,0])for step, batch inenumerate(epoch_iterator):
            model.train()
            batch =tuple(t.to(args.device)for t in batch)
            inputs ={'input_ids':      batch[0],'attention_mask': batch[1],'labels':         batch[3]}if args.model_type !='distilbert':
                inputs['token_type_ids']= batch[2]if args.model_type in['bert','xlnet']elseNone# XLM, DistilBERT and RoBERTa don't use segment_ids
            outputs = model(**inputs)
            loss = outputs[0]# model outputs are always tuple in transformers (see doc)if args.n_gpu >1:
                loss = loss.mean()# mean() to average on multi-gpu parallel trainingif args.gradient_accumulation_steps >1:
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:with amp.scale_loss(loss, optimizer)as scaled_loss:
                    scaled_loss.backward()else:
                loss.backward()

            tr_loss += loss.item()if(step +1)% args.gradient_accumulation_steps ==0:if args.fp16:
                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)else:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

                optimizer.step()
                scheduler.step()# Update learning rate schedule
                model.zero_grad()
                global_step +=1if args.local_rank in[-1,0]and args.logging_steps >0and global_step % args.logging_steps ==0:# Log metricsif args.local_rank ==-1and args.evaluate_during_training:# Only evaluate when single GPU otherwise metrics may not average well
                        results = evaluate(args, model, tokenizer)for key, value in results.items():
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
                    tb_writer.add_scalar('loss',(tr_loss - logging_loss)/args.logging_steps, global_step)
                    logging_loss = tr_loss

                if args.local_rank in[-1,0]and args.save_steps >0and global_step % args.save_steps ==0:# Save model checkpoint
                    output_dir = os.path.join(args.output_dir,'checkpoint-{}'.format(global_step))ifnot os.path.exists(output_dir):
                        os.makedirs(output_dir)
                    model_to_save = model.module ifhasattr(model,'module')else model  # Take care of distributed/parallel training
                    model_to_save.save_pretrained(output_dir)
                    torch.save(args, os.path.join(output_dir,'training_args.bin'))
                    logger.info("Saving model checkpoint to %s", output_dir)if args.max_steps >0and global_step > args.max_steps:
                epoch_iterator.close()breakif args.max_steps >0and global_step > args.max_steps:
            train_iterator.close()breakif args.local_rank in[-1,0]:
        tb_writer.close()return global_step, tr_loss / global_step

defevaluate(args, model, tokenizer, prefix=""):# Loop to handle MNLI double evaluation (matched, mis-matched)
    eval_task_names =("mnli","mnli-mm")if args.task_name =="mnli"else(args.task_name,)
    eval_outputs_dirs =(args.output_dir, args.output_dir +'-MM')if args.task_name =="mnli"else(args.output_dir,)

    results ={}for eval_task, eval_output_dir inzip(eval_task_names, eval_outputs_dirs):
        eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)ifnot os.path.exists(eval_output_dir)and args.local_rank in[-1,0]:
            os.makedirs(eval_output_dir)

        args.eval_batch_size = args.per_gpu_eval_batch_size *max(1, args.n_gpu)# Note that DistributedSampler samples randomly
        eval_sampler = SequentialSampler(eval_dataset)if args.local_rank ==-1else DistributedSampler(eval_dataset)
        eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)# multi-gpu evalif args.n_gpu >1:
            model = torch.nn.DataParallel(model)# Eval!
        logger.info("***** Running evaluation {} *****".format(prefix))
        logger.info("  Num examples = %d",len(eval_dataset))
        logger.info("  Batch size = %d", args.eval_batch_size)
        eval_loss =0.0
        nb_eval_steps =0
        preds =None
        out_label_ids =Nonefor batch in tqdm(eval_dataloader, desc="Evaluating"):
            model.eval()
            batch =tuple(t.to(args.device)for t in batch)with torch.no_grad():
                inputs ={'input_ids':      batch[0],'attention_mask': batch[1],'labels':         batch[3]}if args.model_type !='distilbert':
                    inputs['token_type_ids']= batch[2]if args.model_type in['bert','xlnet']elseNone# XLM, DistilBERT and RoBERTa don't use segment_ids
                outputs = model(**inputs)
                tmp_eval_loss, logits = outputs[:2]

                eval_loss += tmp_eval_loss.mean().item()
            nb_eval_steps +=1if preds isNone:
                preds = logits.detach().cpu().numpy()
                out_label_ids = inputs['labels'].detach().cpu().numpy()else:
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
                out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)

        eval_loss = eval_loss / nb_eval_steps
        if args.output_mode =="classification":
            preds = np.argmax(preds, axis=1)elif args.output_mode =="regression":
            preds = np.squeeze(preds)
        result = compute_metrics(eval_task, preds, out_label_ids)
        results.update(result)

        output_eval_file = os.path.join(eval_output_dir, prefix,"eval_results.txt")withopen(output_eval_file,"w")as writer:
            logger.info("***** Eval results {} *****".format(prefix))for key insorted(result.keys()):
                logger.info("  %s = %s", key,str(result[key]))
                writer.write("%s = %s\n"%(key,str(result[key])))return results

defload_and_cache_examples(args, task, tokenizer, evaluate=False):if args.local_rank notin[-1,0]andnot evaluate:
        torch.distributed.barrier()# Make sure only the first process in distributed training process the dataset, and the others will use the cache

    processor = processors[task]()
    output_mode = output_modes[task]# Load data features from cache or dataset file
    cached_features_file = os.path.join(args.data_dir,'cached_{}_{}_{}_{}'.format('dev'if evaluate else'train',list(filter(None, args.model_name_or_path.split('/'))).pop(),str(args.max_seq_length),str(task)))if os.path.exists(cached_features_file)andnot args.overwrite_cache:
        logger.info("Loading features from cached file %s", cached_features_file)
        features = torch.load(cached_features_file)else:
        logger.info("Creating features from dataset file at %s", args.data_dir)
        label_list = processor.get_labels()if task in['mnli','mnli-mm']and args.model_type in['roberta']:# HACK(label indices are swapped in RoBERTa pretrained model)
            label_list[1], label_list[2]= label_list[2], label_list[1] 
        examples = processor.get_dev_examples(args.data_dir)if evaluate else processor.get_train_examples(args.data_dir)
        features = convert_examples_to_features(examples,
                                                tokenizer,
                                                label_list=label_list,
                                                max_length=args.max_seq_length,
                                                output_mode=output_mode,
                                                pad_on_left=bool(args.model_type in['xlnet']),# pad on the left for xlnet
                                                pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
                                                pad_token_segment_id=4if args.model_type in['xlnet']else0,)if args.local_rank in[-1,0]:
            logger.info("Saving features into cached file %s", cached_features_file)
            torch.save(features, cached_features_file)if args.local_rank ==0andnot evaluate:
        torch.distributed.barrier()# Make sure only the first process in distributed training process the dataset, and the others will use the cache# Convert to Tensors and build dataset
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
    all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)if output_mode =="classification":
        all_labels = torch.tensor([f.label for f in features], dtype=torch.long)elif output_mode =="regression":
        all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
 
    dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)return dataset

defmain():
    parser = argparse.ArgumentParser()## Required parameters
    parser.add_argument("--data_dir", default=None,type=str, required=True,help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--model_type", default=None,type=str, required=True,help="Model type selected in the list: "+", ".join(MODEL_CLASSES.keys()))
    parser.add_argument("--model_name_or_path", default=None,type=str, required=True,help="Path to pre-trained model or shortcut name selected in the list: "+", ".join(ALL_MODELS))
    parser.add_argument("--task_name", default=None,type=str, required=True,help="The name of the task to train selected in the list: "+", ".join(processors.keys()))
    parser.add_argument("--output_dir", default=None,type=str, required=True,help="The output directory where the model predictions and checkpoints will be written.")## Other parameters
    parser.add_argument("--config_name", default="",type=str,help="Pretrained config name or path if not the same as model_name")
    parser.add_argument("--tokenizer_name", default="",type=str,help="Pretrained tokenizer name or path if not the same as model_name")
    parser.add_argument("--cache_dir", default="",type=str,help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length", default=128,type=int,help="The maximum total input sequence length after tokenization. Sequences longer ""than this will be truncated, sequences shorter will be padded.")
    parser.add_argument("--do_train", action='store_true',help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true',help="Whether to run eval on the dev set.")
    parser.add_argument("--evaluate_during_training", action='store_true',help="Rul evaluation during training at each logging step.")
    parser.add_argument("--do_lower_case", action='store_true',help="Set this flag if you are using an uncased model.")

    parser.add_argument("--per_gpu_train_batch_size", default=8,type=int,help="Batch size per GPU/CPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size", default=8,type=int,help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument('--gradient_accumulation_steps',type=int, default=1,help="Number of updates steps to accumulate before performing a backward/update pass.")     
    parser.add_argument("--learning_rate", default=5e-5,type=float,help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay", default=0.0,type=float,help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-8,type=float,help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0,type=float,help="Max gradient norm.")
    parser.add_argument("--num_train_epochs", default=3.0,type=float,help="Total number of training epochs to perform.")
    parser.add_argument("--max_steps", default=-1,type=int,help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
    parser.add_argument("--warmup_steps", default=0,type=int,help="Linear warmup over warmup_steps.")

    parser.add_argument('--logging_steps',type=int, default=50,help="Log every X updates steps.")
    parser.add_argument('--save_steps',type=int, default=50,help="Save checkpoint every X updates steps.")
    parser.add_argument("--eval_all_checkpoints", action='store_true',help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
    parser.add_argument("--no_cuda", action='store_true',help="Avoid using CUDA when available")
    parser.add_argument('--overwrite_output_dir', action='store_true',help="Overwrite the content of the output directory")
    parser.add_argument('--overwrite_cache', action='store_true',help="Overwrite the cached training and evaluation sets")
    parser.add_argument('--seed',type=int, default=42,help="random seed for initialization")

    parser.add_argument('--fp16', action='store_true',help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
    parser.add_argument('--fp16_opt_level',type=str, default='O1',help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--local_rank",type=int, default=-1,help="For distributed training: local_rank")
    parser.add_argument('--server_ip',type=str, default='',help="For distant debugging.")
    parser.add_argument('--server_port',type=str, default='',help="For distant debugging.")
    args = parser.parse_args()if os.path.exists(args.output_dir)and os.listdir(args.output_dir)and args.do_train andnot args.overwrite_output_dir:raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))# Setup distant debugging if neededif args.server_ip and args.server_port:# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-scriptimport ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()# Setup CUDA, GPU & distributed trainingif args.local_rank ==-1or args.no_cuda:
        device = torch.device("cuda"if torch.cuda.is_available()andnot args.no_cuda else"cpu")
        args.n_gpu = torch.cuda.device_count()else:# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu =1
    args.device = device

    # Setup logging
    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt ='%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in[-1,0]else logging.WARN)
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
                    args.local_rank, device, args.n_gpu,bool(args.local_rank !=-1), args.fp16)# Set seed
    set_seed(args)# Prepare GLUE task
    args.task_name = args.task_name.lower()print(processors)if args.task_name notin processors:raise ValueError("Task not found: %s"%(args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels =len(label_list)# Load pretrained model and tokenizerif args.local_rank notin[-1,0]:
        torch.distributed.barrier()# Make sure only the first process in distributed training will download model & vocab

    args.model_type = args.model_type.lower()
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
                                          num_labels=num_labels,
                                          finetuning_task=args.task_name,
                                          cache_dir=args.cache_dir if args.cache_dir elseNone)
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
                                                do_lower_case=args.do_lower_case,
                                                cache_dir=args.cache_dir if args.cache_dir elseNone)####
    tokenizer.add_special_tokens({"pad_token":"[PAD]"})####
    model = model_class.from_pretrained(args.model_name_or_path,
                                        from_tf=bool('.ckpt'in args.model_name_or_path),
                                        config=config,
                                        cache_dir=args.cache_dir if args.cache_dir elseNone)if args.local_rank ==0:
        torch.distributed.barrier()# Make sure only the first process in distributed training will download model & vocab

    model.to(args.device)

    logger.info("Training/evaluation parameters %s", args)# Trainingif args.do_train:
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()if args.do_train and(args.local_rank ==-1or torch.distributed.get_rank()==0):# Create output directory if neededifnot os.path.exists(args.output_dir)and args.local_rank in[-1,0]:
            os.makedirs(args.output_dir)

        logger.info("Saving model checkpoint to %s", args.output_dir)# Save a trained model, configuration and tokenizer using `save_pretrained()`.# They can then be reloaded using `from_pretrained()`
        model_to_save = model.module ifhasattr(model,'module')else model  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)# Good practice: save your training arguments together with the trained model
        torch.save(args, os.path.join(args.output_dir,'training_args.bin'))# Load a trained model and vocabulary that you have fine-tuned
        model = model_class.from_pretrained(args.output_dir)
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
        model.to(args.device)# Evaluation
    results ={}if args.do_eval and args.local_rank in[-1,0]:
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
        checkpoints =[args.output_dir]if args.eval_all_checkpoints:
            checkpoints =list(os.path.dirname(c)for c insorted(glob.glob(args.output_dir +'/**/'+ WEIGHTS_NAME, recursive=True)))
            logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)# Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)for checkpoint in checkpoints:
            global_step = checkpoint.split('-')[-1]iflen(checkpoints)>1else""
            prefix = checkpoint.split('/')[-1]if checkpoint.find('checkpoint')!=-1else""
            
            model = model_class.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=prefix)
            result =dict((k +'_{}'.format(global_step), v)for k, v in result.items())
            results.update(result)return results

if __name__ =="__main__":
    main()

此时执行这两行代码便不会报错了。

model = torch.hub.load('huggingface/pytorch-transformers','model','bert-base-chinese')
tokenizer = torch.hub.load('huggingface/pytorch-transformers','tokenizer','bert-base-chinese')

解决方案2-使用新版本,见问题2.


本文转载自: https://blog.csdn.net/weixin_37935970/article/details/123238677
版权归原作者 程序媛赵可乐 所有, 如有侵权,请联系我们删除。

“加载预训练模型遇到transformers的问题”的评论:

还没有评论