Python建模与NLP大数据,机器学习,人工智能机器学习和人工智能入门

使用word2vec训练中文词向量

2018-01-27  本文已影响11324人  sudop

一、文本处理流程

通常我们文本处理流程如下:

二、训练过程

数据预处理
分词
# 多线程分词
# jieba.enable_parallel()
#加载自定义词典
jieba.load_userdict("F:/baike_spider/dict/baike_word_chinese")
#加载停用词
def getStopwords():
    stopwords = []
    with open("stop_words.txt", "r", encoding='utf8') as f:
        lines = f.readlines()
        for line in lines:
            stopwords.append(line.strip())
    return stopwords
#分词
def segment():
    file_nums = 0
    count = 0
    url = base_url + 'processed_data/demo/'
    fileNames = os.listdir(url)
    for file in fileNames:
        logging.info('starting ' + str(file_nums) + 'file word Segmentation')
        segment_file = open(url + file + '_segment', 'a', encoding='utf8')
        with open(url + file, encoding='utf8') as f:
            text = f.readlines()
            for sentence in text:
                sentence = list(jieba.cut(sentence))
                sentence_segment = []
                for word in sentence:
                    if word not in stopwords:
                        sentence_segment.append(word)
                segment_file.write(" ".join(sentence_segment))
            del text
            f.close()
        segment_file.close()
        logging.info('finished ' + str(file_nums) + 'file word Segmentation')
        file_nums += 1
word2vec训练
import logging
import multiprocessing
import os.path
import sys

from gensim.models import Word2Vec
from gensim.models.word2vec import PathLineSentences

if __name__ == '__main__':
    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
    logging.root.setLevel(level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # check and process input arguments
    # if len(sys.argv) < 4:
    #     print(globals()['__doc__'] % locals())
    #     sys.exit(1)
    # input_dir, outp1, outp2 = sys.argv[1:4]
    input_dir = '../baike/segment'
    outp1 = 'model/baike.model'
    outp2 = 'model/word2vec_format'
    fileNames = os.listdir(input_dir)
    # 训练模型 输入语料目录 embedding size 256,共现窗口大小10,去除出现次数5以下的词,多线程运行,迭代10次
    model = Word2Vec(PathLineSentences(input_dir),
                     size=256, window=10, min_count=5,
                     workers=multiprocessing.cpu_count(), iter=10)
    model.save(outp1)
    model.wv.save_word2vec_format(outp2, binary=False)

    # 运行命令:输入训练文件目录 python word2vec_model.py data baike.model baike.vector
模型效果
上一篇 下一篇

猜你喜欢

热点阅读