您的当前位置:首页正文

NER问题的模型训练5种相关提升tricks

2024-11-12 来源:个人技术集锦

NER问题的模型训练5种相关提升tricks

一.cMedQANER数据集介绍

用于医疗领域的命名实体识别任务

实体标签:{'physiology', 'test', 'disease', 'time', 'drug', 'symptom', 'body', 'department', 'crowd', 'feature', 'treatment'}

训练数据采用BIO的形式进行标注

便 B_disease
秘 I_disease
两 O
个 O
多 O
月 O
不 O
清 O
楚 O

二.模型选择

采用bert+bilstm+crf

2.1数据清洗

def load_data(data_path,max_len):
    """加载数据
    单条格式:[(片段1, 标签1), (片段2, 标签2), (片段3, 标签3), ...]
    """
    datasets = []
    samples_len = []
    
    X = []
    y = []
    sentence = []
    labels = []
    split_pattern = re.compile(r'[;;。,、?!\.\?,! ]')
    with open(data_path,'r',encoding = 'utf8') as f:
        for line in f.readlines():
            #每行为一个字符和其tag,中间用tab或者空格隔开
            # sentence = [w1,w2,w3,...,wn], labels=[B-xx,I-xxx,,,...,O]
            line = line.strip().split()
            if(not line or len(line) < 2): 
                X.append(sentence)
                y.append(labels)
                sentence = []
                labels = []
                continue
            word, tag = line[0], line[1].replace('_','-').replace('M','I').replace('E','I').replace('S','B') # BMES -> BIO
            if split_pattern.match(word) and len(sentence)+8 >= max_len:
                sentence.append(word)
                labels.append(tag)
                X.append(sentence)
                y.append(labels)
                sentence = []
                labels = []
            else:
                sentence.append(word)
                labels.append(tag)
    if len(sentence):
        X.append(sentence)
        sentence = []
        y.append(labels)
        labels = []

    for token_seq,label_seq in zip(X,y):
        #目标sample_seq=[['XXXX','disease'],['asaa','drug'],[],...]
        if len(token_seq) < 2:
            continue
        sample_seq, last_flag = [], ''
        for token, this_flag in zip(token_seq,label_seq):
            if this_flag == 'O' and last_flag == 'O':
                sample_seq[-1][0] += token
            elif this_flag == 'O' and last_flag != 'O':
                sample_seq.append([token, 'O'])
            elif this_flag[:1] == 'B':
                sample_seq.append([token, this_flag[2:]]) # B-city
            else:
                if sample_seq:
                    sample_seq[-1][0] += token
            last_flag = this_flag

        datasets.append(sample_seq)
        samples_len.append(len(token_seq))
        if len(token_seq) > 200:
            print(token_seq)

    df = pd.DataFrame(samples_len)
    print(data_path,'\n',df.describe())
    print(sorted(set([i for arr in y for i in arr])))
    #datasets训练数据
    return datasets,y

2.2模型的搭建

def bert_bilstm_crf(config_path,checkpoint_path,num_labels,lstm_units,drop_rate,leraning_rate):
	bert = build_transformer_model(
			config_path = config_path,
			checkpoint_path = checkpoint_path,
			model = 'bert',
			return_keras_model = False
		)
	x = bert.model.output # [batch_size, seq_length, 768]
	lstm = keras.layers.Bidirectional(
			keras.layers.LSTM(
				lstm_units,
				kernel_initializer='he_normal',
				return_sequences=True
			)
		)(x) # [batch_size, seq_length, lstm_units * 2]

	x = keras.layers.concatenate(
			[lstm,x],
			axis=-1
		) # [batch_size, seq_length, lstm_units * 2 + 768]

	x = keras.layers.TimeDistributed(
			keras.layers.Dropout(drop_rate)
		)(x) # [batch_size, seq_length, lstm_units * 2 + 768]

	x = keras.layers.TimeDistributed(
			keras.layers.Dense(
					num_labels,
					activation='relu',
					kernel_initializer='he_normal',
				)
		)(x) # [batch_size, seq_length, num_labels]

	crf = ConditionalRandomField()
	output = crf(x)

	model = keras.models.Model(bert.input, output)
	model.summary()
	model.compile(
			loss=crf.sparse_loss,
			optimizer=Adam(leraning_rate),
			metrics=[crf.sparse_accuracy]
		)

	return model,crf

2.3保存最佳模型的回调函数设计

    checkpoint = keras.callbacks.ModelCheckpoint(
        checkpoint_save_path, 
        monitor='val_sparse_accuracy', 
        verbose=1, 
        save_best_only=True,
        mode='max'
        )
    model.fit(
        train_generator.forfit(),
        steps_per_epoch=len(train_generator),
        validation_data=valid_generator.forfit(),
        validation_steps=len(valid_generator),
        epochs=epochs,
        callbacks=[checkpoint]
    )

注意这里模型保存最佳模型的时候是根据验证数据中对标签(B-disease,I-drug)这种准确度进行的,而我们的评估指标f1不是基于标签的,是基于实体的,这也导致了训练监控指标和评估指标不一致的出现,模型还可以进一步优化,在之后的tricks里面会说做法。

2.3 CRF解码器

把模型预测出的标签解码成实体

class NamedEntityRecognizer(ViterbiDecoder):
    """命名实体识别器
    """
    def recognize(self, text):
        #text = ['','','','']
        tokens = tokenizer.tokenize(text)
        while len(tokens) > max_len: #移除倒数第二个位置
            tokens.pop(-2)
        """
        rematch:给出原始的text和tokenize后的tokens的映射关系
        """
        mapping = tokenizer.rematch(text, tokens)
        token_ids = tokenizer.tokens_to_ids(tokens)
        segment_ids = [0] * len(token_ids)
        token_ids, segment_ids = to_array([token_ids], [segment_ids]) # ndarray
        nodes = model.predict([token_ids, segment_ids])[0] # [seq_len,43]
        labels = self.decode(nodes) # id [sqe_len,], [0 0 0 0 0 7 8 8 0 0 0 0 0 0 0]
        entities, starting = [], False
        """
         test_data[1:2][0]:
         [   ['浙江省', 'prov'],
             ['杭州市', 'city'],
             ['余杭', 'district'],
             ['乔司街道', 'town'],
             ['博卡路', 'road'],
             ['0号', 'roadno'],
             ['博卡制衣', 'poi']]
         text:'浙江省杭州市余杭乔司街道博卡路0号博卡制衣'
         tokens:['[CLS]','浙','江','省','杭','州','市','余','杭','乔','司','街','道','博','卡','路','0','号','博','卡','制','衣','[SEP]']
             
         nodes:(23, 43)
         labels:(23,)[ 0 27 28 28  5  6  6 15 16 39 40 40 40 31 32 32 33 34 25 26 26 26  0]
         entities:
         [[[1, 2, 3], 'prov'],
         [[4, 5, 6], 'city'],
         [[7, 8], 'district'],
         [[9, 10, 11, 12], 'town'],
         [[13, 14, 15], 'road'],
         [[16, 17], 'roadno'],
         [[18, 19, 20, 21], 'poi']]
         
         mapping:[[],[0],[1],[2],[3],[4],[5],[6],[7],[8],[9],[10],[11],[12],[13],[14],[15],[16],[17],[18],[19],[20],[]]
        """
        for i, label in enumerate(labels):
            if label > 0:
                #奇数是B-  开始id
                if label % 2 == 1:
                    starting = True
                    entities.append([[i], id2label[(label - 1) // 2]])
                elif starting:
                    entities[-1][0].append(i)
                else:
                    starting = False
            else:
                starting = False
        """
        return:[ ('浙江省', 'prov'),
                 ('杭州市', 'city'),
                 ('余杭', 'district'),
                 ('乔司街道', 'town'),
                 ('博卡路', 'road'),
                 ('0号', 'roadno'),
                 ('博卡制衣', 'poi')]
        """
        return [(text[mapping[w[0]][0]:mapping[w[-1]][-1] + 1], l) for w, l in entities]

上面我以天池的某街道识别数据针对解码这一过程进行了详细的注释,同样适用于我们现在使用的医疗数据集的解码过程,可以看到最后返回的不再是一个个标签,而是由标签组成的实体(‘浙江省’, ‘prov’)

三.模型优化的5种tricks

3.1统一训练监控指标和评估指标

这里是想保存最优模型的时候就也以实体作为f1指标的,同评估的时候保持一致

#假设data就是验证集
def ner_metrics(data):
    X,Y,Z = 1e-6,1e-6,1e-6
    for d in tqdm(data):
        text = ''.join([i[0] for i in d])
        pred = NER.recognize(text)   #注:这里的NER就是上面的CRF解码器实例化的一个名字
        R = set(pred)  
        T = set([tuple(i) for i in d if i[1] != 'O'])
        X += len(R & T)
        Y += len(R)
        Z += len(T)
    f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
    return f1, precision, recall

#评估回调函数

class Evaluator(keras.callbacks.Callback):
    def __init__(self):
        super(Evaluator,self).__init__()
        self.best_val_f1 = 0

    def on_epoch_end(self,epoch,logs=None):
        #每一轮都需要更新概率矩阵
        NER.trans = K.eval(CRF.trans)
        f1, precision, recall = ner_metrics(valid_data)
        if f1 > self.best_val_f1:
            model.save_weights(checkpoint_save_path)
            self.best_val_f1 = f1 
            print('save model to {}'.format(checkpoint_save_path))
        else:
            global leraning_rate
            leraning_rate = leraning_rate / 5 
        print(
            'valid:  f1: %.5f, precision: %.5f, recall: %.5f, best f1: %.5f\n' %
            (f1, precision, recall, self.best_val_f1)
        )
checkpoint = keras.callbacks.ModelCheckpoint(
        checkpoint_save_path, 
        monitor='val_sparse_accuracy', 
        verbose=1, 
        save_best_only=True,
        mode='max'
        )
evaluator = Evaluator()   #实例化回调函数(改变指标为实体f1)
model.fit(
        train_generator.forfit(),
        steps_per_epoch=len(train_generator),
        validation_data=valid_generator.forfit(),
        validation_steps=len(valid_generator),
        epochs=epochs,
        callbacks=[evaluator]    #回调函数使用evaluator
    )

3.2学习率衰减策略
参考博文

def scheduler(epoch):
        return leraning_rate /(max(2*(epoch-1),1))
lr_scheduler = keras.callbacks.LearningRateScheduler(scheduler)
model.fit(
        train_generator.forfit(),
        steps_per_epoch=len(train_generator),
        validation_data=valid_generator.forfit(),
        validation_steps=len(valid_generator),
        epochs=epochs,
        callbacks=[evaluator,lr_scheduler]
    )

3.3分层学习率技巧

因为这里使用bert模型进行微调,则后面的bilstm和dense层的权重都使用的是随机初始化的方式,那在有限的epoch里面,可能效果并不是很好,另一方面设置的学习率是1e-5,学习率也很小所以这里采用:分层设置学习率,非bert层要大

class SetLearningRate:
    """
    层的一个包装,用来设置当前层的学习率
    """
    def __init__(self, layer, lamb, is_ada=False):
        self.layer = layer
        self.lamb = lamb # 学习率比例
        self.is_ada = is_ada # 是否自适应学习率优化器
        
    def __call__(self, inputs):
        with K.name_scope(self.layer.name):
            if not self.layer.built:
                input_shape = K.int_shape(inputs)
                self.layer.build(input_shape)
                self.layer.built = True
                if self.layer._initial_weights is not None:
                    self.layer.set_weights(self.layer._initial_weights)
        for key in ['kernel', 'bias', 'embeddings', 'depthwise_kernel', 'pointwise_kernel', 'recurrent_kernel', 'gamma', 'beta']:
            if hasattr(self.layer, key):
                weight = getattr(self.layer, key)
                if self.is_ada:
                    lamb = self.lamb # 自适应学习率优化器直接保持lamb比例
                else:
                    lamb = self.lamb**0.5 # SGD(包括动量加速),lamb要开平方
                K.set_value(weight, K.eval(weight) / lamb) # 更改初始化
                setattr(self.layer, key, weight * lamb) # 按比例替换
        return self.layer(inputs)

#模型加入分层学习率设置
def bert_bilstm_crf(config_path,checkpoint_path,num_labels,lstm_units,drop_rate,leraning_rate):
	bert = build_transformer_model(
			config_path = config_path,
			checkpoint_path = checkpoint_path,
			model = 'bert',
			return_keras_model = False
		)
	x = bert.model.output # [batch_size, seq_length, 768]
	lstm = SetLearningRate(
			keras.layers.Bidirectional(
				keras.layers.LSTM(
					lstm_units,
					kernel_initializer='he_normal',
					return_sequences=True
				)
			),
			500,   #是bert学习率的500倍
			True
		)(x) # [batch_size, seq_length, lstm_units * 2]

	x = keras.layers.concatenate(
			[lstm,x],
			axis=-1
		) # [batch_size, seq_length, lstm_units * 2 + 768]

	x = keras.layers.TimeDistributed(
			keras.layers.Dropout(drop_rate)
		)(x) # [batch_size, seq_length, lstm_units * 2 + 768]

	x = SetLearningRate(
			keras.layers.TimeDistributed(
				keras.layers.Dense(
						num_labels,
						activation='relu',
						kernel_initializer='he_normal',
					)
			), 
			500, 
			True
		)(x) # [batch_size, seq_length, num_labels]

	crf = ConditionalRandomField(lr_multiplier=500)
	output = crf(x)

	model = keras.models.Model(bert.input, output)
	model.summary()
	model.compile(
			loss=crf.sparse_loss,
			optimizer=Adam(leraning_rate),
			metrics=[crf.sparse_accuracy]
		)

	return model,crf

3.4使用对抗训练提升模型鲁棒性

def adversarial_training(model, embedding_name, epsilon=1):
    """
    给模型添加对抗训练
    其中model是需要添加对抗训练的keras模型
    这里针对的是bert的embdedding层
    """
    if model.train_function is None:  # 如果还没有训练函数
        model._make_train_function()  # 手动make
    old_train_function = model.train_function  # 备份旧的训练函数

    # 查找Embedding层
    for output in model.outputs:
        embedding_layer = search_layer(output, embedding_name)
        if embedding_layer is not None:
            break
    if embedding_layer is None:
        raise Exception('Embedding layer not found')

    # 求Embedding梯度
    embeddings = embedding_layer.embeddings  # Embedding矩阵
    gradients = K.gradients(model.total_loss, [embeddings])  # Embedding梯度
    gradients = K.zeros_like(embeddings) + gradients[0]  # 转为dense tensor

    # 封装为函数
    inputs = (
        model._feed_inputs + model._feed_targets + model._feed_sample_weights
    )  # 所有输入层
    embedding_gradients = K.function(
        inputs=inputs,
        outputs=[gradients],
        name='embedding_gradients',
    )  # 封装为函数

    def train_function(inputs):
        # 重新定义训练函数
        grads = embedding_gradients(inputs)[0]  # Embedding梯度
        delta = epsilon * grads / (np.sqrt((grads**2).sum()) + 1e-8)  # 计算扰动
        K.set_value(embeddings, K.eval(embeddings) + delta)  # 注入扰动
        outputs = old_train_function(inputs)  # 梯度下降
        K.set_value(embeddings, K.eval(embeddings) - delta)  # 删除扰动
        return outputs
    model.train_function = train_function  # 覆盖原训练函数

3.5更精细化调参

例如调整学习率or batch_size or lamb等等

四.各种tricks加持之下的模型提升效果

之后补上,最近服务器上不去

显示全文