基于gensim计算文档相似性

gensim 官网: https://radimrehurek.com/gensim/tutorial.html

训练tfidf, lsi, lda, doc2vec等4种模型向量化文档

输入文件两列: 标题 \t 分词

do_train_model.py 训练模型

#! /usr/bin/env python
#encoding: utf-8

import sys
import os
import re
import logging
import time
from six import iteritems
from gensim import corpora, models, similarities
from gensim.models.doc2vec import LabeledSentence

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
        filename='1.log',
        filemode='w',
        level=logging.INFO)

g_charset='gbk'
g_pattern = re.compile(' +')

class MyCorpus(object):
    def __init__(self, fname):
        self.fname = fname
    def __iter__(self):
        for i,line in enumerate(open(self.fname)):
            s = line.rstrip('\n').split('\t')
            yield g_pattern.split(s[1].decode(g_charset)) # format: title \t tokens

class MyLabelCorpus(object):
    def __init__(self, fname):
        self.fname = fname
    def __iter__(self):
        for i,line in enumerate(open(self.fname)):
            s = line.rstrip('\n').split('\t')
            yield LabeledSentence(words=s[1].decode(g_charset, 'ignore').split(),tags = [i])

def train_tfidf(corpus, dictionary, model_file, vec_file):
    '''
    train tfidf model
    '''
    tfidf = models.TfidfModel(corpus)
    tfidf.save(model_file)
    corpus_tfidf = tfidf[corpus]
    corpora.SvmLightCorpus.serialize(vec_file, corpus_tfidf) # unserialize: corpora.SvmLightCorpus(vec_file)

def train_lsi(corpus, dictionary, model_file, vec_file):
    '''
    train lsi model
    '''
    tfidf = models.TfidfModel(corpus)
    corpus_tfidf = tfidf[corpus]
    lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=100)
    lsi.save(model_file)
    corpus_new = lsi[corpus_tfidf]
    corpora.SvmLightCorpus.serialize(vec_file, corpus_new)

def train_lda(corpus, dictionary, model_file, vec_file):
    '''
    train lda model
    '''
    #tfidf = models.TfidfModel(corpus)
    #corpus_tfidf = tfidf[corpus]

    # also can use LdaModel
    lda = models.LdaMulticore(corpus, id2word=dictionary, num_topics=100, chunksize = 2000, passes = 50, iterations = 50, eval_every = None, workers = 8)
    lda.save(model_file)
    corpus_new = lda[corpus]
    corpora.SvmLightCorpus.serialize(vec_file, corpus_new)

def save_svmlight_format(docvecs, outfile):
    fout = file(outfile, 'w')
    for t in docvecs:
        a = []
        for i,v in enumerate(t):
            a.append("%d:%.6f" % (i+1, v))
        fout.write("0 %s\n" % " ".join(a))
    fout.close()

def train_doc2vec(infile, model_file, vec_file):
    '''
    train doc2vec model
    '''
    corp = MyLabelCorpus(infile)
    model = models.Doc2Vec(corp, size=100, window=5, min_count=3, workers=12, hs=1, negative=0, dbow_words=1, iter = 40)
    model.save(model_file)
    save_svmlight_format(model.docvecs, vec_file)

def read_stop_file(stop_file):
    stoplist = []
    if os.path.isfile(stop_file):
        with open(stop_file) as f:
            stoplist = [w.strip().decode(g_charset, 'ignore') for w in f.readlines()]
    return stoplist

def read_corpus(infile):
    '''
    read corpus file and filter words
    '''
    corp = MyCorpus(infile)
    dictionary = corpora.Dictionary(corp)

    stop_file = 'stopwords.txt'
    stoplist = read_stop_file(stop_file)

    stop_ids = [dictionary.token2id[stopword] for stopword in stoplist \
            if stopword in dictionary.token2id]
    once_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs) if docfreq <= 1]
    print "stop_ids: ",len(stop_ids)
    print "once_ids: ",len(once_ids)
    dictionary.filter_tokens(stop_ids + once_ids)
    dictionary.compactify()

    print "uniq tokens:", len(dictionary)

    corpus = [dictionary.doc2bow(text) for text in corp]
    return corpus, dictionary

def train_model(infile, tag):
    '''
    train different model to vecterize the document
    '''
    valid_tags = set(["tfidf", 'lsi', 'lda', 'doc2vec'])
    if tag not in valid_tags:
        print "wrong tag: %s" % tag
        return

    ts = time.time()
    prefix = "%s.%s" % (infile, tag)
    model_file = prefix + ".model"
    vec_file = prefix + ".vec"

    if tag == 'doc2vec':
        train_doc2vec(infile, model_file, vec_file)
    else:
        corpus, dictionary = read_corpus(infile)

        if tag == 'tfidf':
            train_tfidf(corpus, dictionary, model_file, vec_file)
        elif tag == 'lsi':
            train_lsi(corpus, dictionary, model_file, vec_file)
        elif tag == 'lda':
            train_lda(corpus, dictionary, model_file, vec_file)

    ts2 = time.time()
    cost = int(ts2-ts)
    print "cost_time:\t%s\t%s\t%d" % (infile, tag, cost)

if __name__ == '__main__':
    if len(sys.argv) != 3:
        print "Usage: %s <infile> <tag>" % __file__
        print "\t train different model to vecterize the document"
        print "<tag>: tfidf, lsi, lda, doc2vec"
        sys.exit(-1)

    infile = sys.argv[1]
    tag = sys.argv[2]
    train_model(infile, tag)

do_query_simi.py 查询相似文档

#! /usr/bin/env python
#encoding: utf-8

import sys
import os
import logging
from gensim import corpora, models, similarities

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
        filename='1.log',
        filemode='w',
        level=logging.INFO)

def read_doc_file(infile):
    '''
    read doc file
    format: title \t tokens
    '''
    docs = []
    for line in file(infile):
        s = line.rstrip('\n').split('\t')
        docs.append(s[0])
    return docs

def get_feature_num(corpus_semantic):
    '''
    '''
    max_index = -1
    for i,v in enumerate(corpus_semantic):
        max_cur = max([t[0] for t in v])
        if max_cur > max_index:
            max_index = max_cur
    max_index += 1
    return max_index

def query_simi(infile, tag):
    '''
    query similar documents based on trained document vectors
    '''
    valid_tags = set(["tfidf", 'lsi', 'lda', 'doc2vec'])
    if tag not in valid_tags:
        print "wrong tag: %s" % tag
        return

    prefix = "%s.%s" % (infile, tag)
    vec_file = prefix + ".vec"
    index_file = vec_file + ".index"

    index = None
    corpus_semantic = corpora.SvmLightCorpus(vec_file)
    n = get_feature_num(corpus_semantic)
    print "feature num:", n
    if os.path.isfile(index_file):
        if tag == 'tfidf':
            index = similarities.SparseMatrixSimilarity.load(index_file)
        else:
            index = similarities.MatrixSimilarity.load(index_file)
    else:
        if tag == 'tfidf':
            index = similarities.SparseMatrixSimilarity(corpus_semantic, num_features = n)
        else:
            index = similarities.MatrixSimilarity(corpus_semantic)
        index.save(index_file)

    # read file
    docs = read_doc_file(infile)
    doc_map = {}
    for i,doc in enumerate(docs):
        doc_map[doc] = i

    # query
    topN = 10
    corpus_semantic = list(corpus_semantic)
    while True:
        query = raw_input("\ninput query: ")
        if query == 'q' or query == 'quit':
            break
        query = query.strip()
        q = doc_map.get(query, -1)
        if q == -1:
            continue
        print q
        #print "query_doc: %s" % docs[q]
        sims = index[corpus_semantic[q]]
        sims = sorted(enumerate(sims), key=lambda item: -item[1])
        for k,v in sims[:topN]:
            i = int(k)
            print "%.3f\t%s" % (v, docs[i])

if __name__ == '__main__':
    if len(sys.argv) != 3:
        print "Usage: %s <infile> <tag>" % __file__
        print "<tag>: tfidf, lsi, lda, doc2vec"
        sys.exit(-1)

    infile = sys.argv[1]
    tag = sys.argv[2]
    query_simi(infile, tag)
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 194,491评论 5 459
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 81,856评论 2 371
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 141,745评论 0 319
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 52,196评论 1 263
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 61,073评论 4 355
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 46,112评论 1 272
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 36,531评论 3 381
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 35,215评论 0 253
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 39,485评论 1 290
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 34,578评论 2 309
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 36,356评论 1 326
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 32,215评论 3 312
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 37,583评论 3 299
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 28,898评论 0 17
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 30,174评论 1 250
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 41,497评论 2 341
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 40,697评论 2 335

推荐阅读更多精彩内容