NLP&NLU

8NER实战-(3)BiLSTM+CRF

2019-10-13  本文已影响0人  弟弟们的哥哥
# embedding层单词和分词信息
embedding = self.embedding_layer(self.word_inputs, self.seg_inputs, config)
# lstm输入层
lstm_inputs = tf.nn.dropout(embedding, self.dropout)
# lstm输出层
lstm_outputs = self.biLSTM_layer(lstm_inputs, self.lstm_dim, self.lengths)
# 投射层
self.logits = self.project_layer(lstm_outputs)
# 损失
self.loss = self.crf_loss_layer(self.logits, self.lengths)

tf.nn.embedding_lookup的作用就是找到要寻找的embedding data中的对应的行下的vector。


image.png
def embedding_layer(self, word_inputs, seg_inputs, config, name=None):
    """
    :param word_inputs: one-hot编码.其实所有字的one_hot编码
    :param seg_inputs: 分词特征
    :param config: 配置
    :param name: 层的命名
    :return:  shape = [word_inputs,word_dim+seg_dim]
    """
    embedding = []
    with tf.variable_scope("word_embedding" if not name else name), tf.device('/cpu:0'):
        self.word_lookup = tf.get_variable(
            name="word_embedding",
            shape=[self.num_words, self.word_dim],
            initializer=self.initializer
        )
        embedding.append(tf.nn.embedding_lookup(self.word_lookup, word_inputs))

        if config['seg_dim']:
            with tf.variable_scope("seg_embedding"), tf.device('/cpu:0'):
                self.seg_lookup = tf.get_variable(
                    name="seg_embedding",
                    shape=[self.num_sges, self.seg_dim],
                    initializer=self.initializer
                )
                embedding.append(tf.nn.embedding_lookup(self.seg_lookup, seg_inputs))
        embed = tf.concat(embedding, axis=-1)
    return embed
image.png
def biLSTM_layer(self, lstm_inputs, lstm_dim, lengths, name=None):
    """
    :param lstm_inputs: [batch_size, num_steps, emb_size]
    :param lstm_dim:
    :param name:
    :return: [batch_size, num_steps, 2*lstm_dim]
    为何返回是2*lstm_dim,因为其是双向的lstm。每个方向的输出为lstm_dim
    """
    with tf.variable_scope("word_biLSTM" if not name else name):
        lstm_cell = {}
        for direction in ['forward', 'backward']:
            with tf.variable_scope(direction):
                lstm_cell[direction] = rnn.CoupledInputForgetGateLSTMCell(
                    lstm_dim,
                    use_peepholes=True,
                    initializer=self.initializer,
                    state_is_tuple=True
                )
        outputs, final_status = tf.nn.bidirectional_dynamic_rnn(
            lstm_cell['forward'],
            lstm_cell['backward'],
            lstm_inputs,
            dtype=tf.float32,

            sequence_length=lengths
        )
    # 因为单向的lstm输出的格式为[batch_size, num_steps,lstm_dim]。
    # 2表示在lstm_dim这个维度进行拼接。
    # 个人觉得outputs的输出格式为[[batch_size, num_steps,lstm_dim],[batch_size, num_steps,lstm_dim]]
    # 即是一个list。list里面的每一个元素是单向的lstm的输出
    return tf.concat(outputs, axis=2)
def project_layer(self, lstm_outputs, name=None):
    """
    :param lstm_outputs: [batch_size, num_steps, emb_size]
    个人觉得lstm_outputs: [batch_size, num_steps, lstm_dim * 2]  num_steps表示每个句子里面字的数量。即每个批次的句子长度
    :param name:
    :return: [batch_size,num_steps, num_tags]
    """
    with tf.variable_scope('project_layer' if not name else name):
        with tf.variable_scope('hidden_layer'):
            W = tf.get_variable(
                "W",
                shape=[self.lstm_dim * 2, self.lstm_dim],
                dtype=tf.float32,
                initializer=self.initializer
            )
            b = tf.get_variable(
                "b",
                shape=[self.lstm_dim],
                dtype=tf.float32,
                initializer=tf.zeros_initializer()
            )
            out_put = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim * 2])  # 得到所有的字,将所有的字最后编码为lstm_dim长度
            hidden = tf.tanh(tf.nn.xw_plus_b(out_put, W, b))

        with tf.variable_scope('logits'):
            W = tf.get_variable(
                "W",
                shape=[self.lstm_dim, self.num_tags],
                dtype=tf.float32,
                initializer=self.initializer
            )
            b = tf.get_variable(
                "b",
                shape=[self.num_tags],
                dtype=tf.float32,
                initializer=tf.zeros_initializer()
            )
            # 最后将每个字编码为num_tags。即最后想要得到每个字属于每个tag的概率
            pred = tf.nn.xw_plus_b(hidden, W, b)
    #  返回原始的shape。即batch_size,num_setps,num_tags
    return tf.reshape(pred, [-1, self.num_setps, self.num_tags])

+CRF

def crf_loss_layer(self, project_logits, lenghts, name=None):
    """
    # 个人觉得是[-1, self.num_setps, self.num_tags]
    :param project_logits: [1, num_steps, num_tages]
    :param lenghts:
    :param name:
    :return: scalar loss
    听说下面是固定的写法
    """
    with tf.variable_scope('crf_loss' if not name else name):
        small_value = -10000.0
        # 下面是对于一个字。但是最后一维,比原来的标签长度多了一个元素
        start_logits = tf.concat(
            [
                small_value * tf.ones(shape=[self.batch_size, 1, self.num_tags]),
                tf.zeros(shape=[self.batch_size, 1, 1])
            ],
            axis=-1
        )

        pad_logits = tf.cast(
            small_value *
            tf.ones(shape=[self.batch_size, self.num_setps, 1]),
            dtype=tf.float32
        )

        # 貌似是在列的位置最后拼接一个元素.所以此时project_layer层输出的每个字最后一层多了一个元素
        # 即在最后一个维度填充了一个元素
        logits = tf.concat(
            [project_logits, pad_logits],
            axis=-1
        )
        # 此时相当于在每个批次的,每个句子开头位置添加了一个字
        logits = tf.concat(
            [start_logits, logits],
            axis=1
        )
        # 因为self.targets.shape = [batch_size,num_steps].所以下面的操作,类似于在每个句子前面添加了一个字
        # 所以此时就和上面的填充的形状tf.concat([start_logits, logits],axis=1)
        # 对应了起来
        targets = tf.concat(
            [tf.cast(
                self.num_tags * tf.ones([self.batch_size, 1]),
                tf.int32
            ),
                self.targets
            ]
            ,
            axis=-1
        )
        # 每个状态之间的转移矩阵
        self.trans = tf.get_variable(
            "transitions",
            shape=[self.num_tags + 1, self.num_tags + 1],
            initializer=self.initializer
        )

        log_likehood, self.trans = crf_log_likelihood(
            inputs=logits,
            tag_indices=targets,
            transition_params=self.trans,
            sequence_lengths=lenghts + 1  # 因为上面在句子的开头位置添加了一个字
        )
        return tf.reduce_mean(-log_likehood)

用F1值来评估

def evaluate(sess, model, name, manager, id_to_tag, logger):
    logger.info('evaluate:{}'.format(name))
    ner_results = model.evaluate(sess, manager, id_to_tag)
    eval_lines = model_utils.test_ner(ner_results, FLAGS.result_path)
    for line in eval_lines:
        logger.info(line)
    f1 = float(eval_lines[1].strip().split()[-1])

    if name == "dev":
        best_test_f1 = model.best_dev_f1.eval()
        if f1 > best_test_f1:
            tf.assign(model.best_dev_f1, f1).eval()
            logger.info('new best dev f1 socre:{:>.3f}'.format(f1))
        return f1 > best_test_f1
    elif name == "test":
        best_test_f1 = model.best_test_f1.eval()
        if f1 > best_test_f1:
            tf.assign(model.best_test_f1, f1).eval()
            logger.info('new best test f1 score:{:>.3f}'.format(f1))
        return f1 > best_test_f1

关于调参:

这里用CRF++先跑了一遍,速度很快,准确率在0.8左右,recall在0.87左右,f1在0.87多。然后用BiLSTM后接softmax来跑loss一下子降到很低,感觉很容易局部过拟合。BiLSTM+CRF后, loss稳定变小,到0.15时候准确率变化已经比较少了,比不接CRF的更快拟合。总体准确率比无CRF的更高。另外,迭代次数调高后,准确率也会提高一点。

上一篇下一篇

猜你喜欢

热点阅读