一.通常关于文本聚类也都是针对已有的一堆历史数据进行聚类,比如常用的方法有kmeans,dbscan等。如果有个需求需要针对流式文本进行聚类(即来一条聚一条),那么这些方法都不太适用了,当然也有很多其它针对流式数据进行动态聚类方法,动态聚类也有很多挑战,比如聚类个数是不固定的,聚类的相似阈值也不好设。这些都有待继续研究下去。本文实现一个简单single-pass单遍聚类方法,文本间的相似度是利用余弦距离,文本向量可以用tfidf(这里的idf可以在一个大的文档集里统计得到,然后在新的文本中的词直接利用),也可以用一些如word2vec,bert等中文预训练模型对文本进行向量表示。

二.程序

 import numpy as np
import os
import sys
import pickle
import collections
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from gensim import corpora, models, matutils
from utils.tokenizer import load_stopwords, load_samples, tokenizer, word_segment, load_data, read_data_to_list
from gensim.models import doc2vec, Doc2Vec
from sklearn.metrics.pairwise import cosine_similarity '''
大体流程:
input:doc vector;threshold
output:cluster
begin
input doc vector
input threshold
first doc as first cluster and it's vector as the center of the cluster
while(doc vectors){
while(clusters){
max_sim,max_cluster = simlarity(doc vector,cluster);
}
if(max_sim > threshold){
max_cluster.put(doc vector);
max_cluster.update_center()
}
else{
build new cluster(doc vector);
}
}
end
'''
class SingelPassCluster(object): '''
1.利用tfidf vec计算cossim
'''
def tfidf_vec(self, corpus, pivot=10, slope=0.25):
dictionary = corpora.Dictionary(corpus) # 形成词典映射
self.dict_size = len(dictionary)
print('dictionary size:{}'.format(len(dictionary)))
corpus = [dictionary.doc2bow(text) for text in corpus] # 词的向量表示
tfidf = models.TfidfModel(corpus, pivot=pivot, slope=slope)
corpus_tfidf = tfidf[corpus]
return corpus_tfidf def get_max_similarity(self, cluster_cores, vector):
max_value = 0
max_index = -1
print('vector:{}'.format(vector))
for k, core in cluster_cores.items():
print('core:{}'.format(core))
similarity = matutils.cossim(vector, core)
if similarity > max_value:
max_value = similarity
max_index = k
return max_index, max_value def single_pass(self, corpus_vec, corpus, theta):
clusters = {}
cluster_cores = {}
cluster_text = {}
num_topic = 0
cnt = 0
for vector, text in zip(corpus_vec, corpus):
if num_topic == 0:
clusters.setdefault(num_topic, []).append(vector)
cluster_cores[num_topic] = vector
cluster_text.setdefault(num_topic, []).append(text)
num_topic += 1
else:
max_index, max_value = self.get_max_similarity(cluster_cores, vector)
if max_value > theta:
clusters[max_index].append(vector)
text_matrix = matutils.corpus2dense(clusters[max_index], num_terms=self.dict_size,
num_docs=len(clusters[max_index])).T # 稀疏转稠密
core = np.mean(text_matrix, axis=0) # 更新簇中心
core = matutils.any2sparse(core) # 将稠密向量core转为稀疏向量
cluster_cores[max_index] = core
cluster_text[max_index].append(text)
else: # 创建一个新簇
clusters.setdefault(num_topic, []).append(vector)
cluster_cores[num_topic] = vector
cluster_text.setdefault(num_topic, []).append(text)
num_topic += 1
cnt += 1
if cnt % 100 == 0:
print('processing {}...'.format(cnt))
return clusters, cluster_text def fit_transform(self, corpus, raw_data, theta=0.5):
tfidf_vec = self.tfidf_vec(corpus) # tfidf_vec是稀疏向量
clusters, cluster_text = self.single_pass(tfidf_vec, raw_data, theta)
return clusters, cluster_text '''
2.利用doc2vec计算cossim
'''
def fit(self, doc2vec_model, corpus, raw_data, theta=0.5):
doc_vec = self.doc_vec(doc2vec_model, corpus)
clusters, cluster_text = self.doc2vec_single_pass(doc_vec, raw_data, theta)
return clusters, cluster_text def fit_2(self, doc_vec, text2index, theta):
clusters, cluster_text = self.doc2vec_single_pass(doc_vec, text2index, theta)
return clusters, cluster_text def doc_vec(self, doc2vec_model, x_train):
print('doc2vec infered vec...')
infered_vectors_list = []
for text, label in x_train:
vector = doc2vec_model.infer_vector(text)
infered_vectors_list.append(vector)
print('infered vector size:{}'.format(len(infered_vectors_list)))
if len(infered_vectors_list) >= 100:
break
return infered_vectors_list def get_doc2vec_similarity(self, cluster_cores, vector):
max_value = 0
max_index = -1
for k, core in cluster_cores.items(): # core -> np.ndarray
similarity = cosine_similarity(vector.reshape(1, -1), core.reshape(1, -1))
similarity = similarity[0, 0]
if similarity > max_value:
max_value = similarity
max_index = k
return max_index, max_value def doc2vec_single_pass(self, corpus_vec, corpus, theta):
clusters = {}
cluster_cores = {}
cluster_text = {}
num_topic = 0
cnt = 0
for vector, text in zip(corpus_vec, corpus):
if num_topic == 0:
clusters.setdefault(num_topic, []).append(vector)
cluster_cores[num_topic] = vector
cluster_text.setdefault(num_topic, []).append(text)
num_topic += 1
else:
max_index, max_value = self.get_doc2vec_similarity(cluster_cores, vector)
if max_value > theta:
clusters[max_index].append(vector)
core = np.mean(clusters[max_index], axis=0) # 更新簇中心
cluster_cores[max_index] = core
cluster_text[max_index].append(text)
else: # 创建一个新簇
clusters.setdefault(num_topic, []).append(vector)
cluster_cores[num_topic] = vector
cluster_text.setdefault(num_topic, []).append(text)
num_topic += 1
cnt += 1
if cnt % 100 == 0:
print('processing {}...'.format(cnt))
return clusters, cluster_text def sim(doc_vec):
vector = doc_vec[0]
print('vector:{}'.format(type(vector)))
for core in doc_vec:
similarity = cosine_similarity(vector.reshape(1,-1), core.reshape(1,-1))
similarity = similarity[0, 0]
print("similarity:{}".format(similarity)) if __name__ == '__main__':
base_path = os.path.abspath(os.path.join(os.getcwd(), '../..'))
process_text = base_path + '/data/process_text.txt' # 处理后的样本路径
doc2vec_path = base_path + '/data/doc2vec.pkl'
cluster_result = base_path + '/data/cluster_result.txt'
doc_vec_path = base_path + '/data/doc_vec.vec' # 经过doc2vec推荐的文本向量 corpus = load_data(process_text)
raw_text = load_samples(process_text) index2corpus = collections.OrderedDict()
for index, line in enumerate(raw_text):
index2corpus[index] = line
text2index = list(index2corpus.keys())
print('docs total size:{}'.format(len(text2index))) single_cluster = SingelPassCluster() cal_vec_type = 'doc2vec' if cal_vec_type == 'tfidf':
clusters, cluster_text = single_cluster.fit_transform(corpus, text2index, theta=0.4) if cal_vec_type == 'doc2vec':
with open(doc_vec_path, 'rb') as file:
infered_vectors_list = pickle.load(file)
clusters, cluster_text = single_cluster.fit_2(infered_vectors_list, text2index, theta=0.6) '''
if os.path.exists(doc2vec_path):
print('doc2vec model loading...')
doc2vec_model = Doc2Vec.load(doc2vec_path)
x_train = read_data_to_list(process_text)
clusters, cluster_text = single_cluster.fit(doc2vec_model, x_train, text2index, theta=0.6)
''' if cal_vec_type == 'd2vsim':
if os.path.exists(doc2vec_path):
print('doc2vec model loading...')
doc2vec_model = Doc2Vec.load(doc2vec_path)
x_train = read_data_to_list(process_text)
doc_vec = single_cluster.doc_vec(doc2vec_model, x_train)
sim(doc_vec) print("............................................................................................")
print("得到的类数量有: {} 个 ...".format(len(clusters)))
print("............................................................................................\n")
# 按聚类语句数量对聚类结果进行降序排列
clusterTopic_list = sorted(cluster_text.items(), key=lambda x: len(x[1]), reverse=True)
with open(cluster_result, 'w', encoding='utf-8') as file_write:
for k in clusterTopic_list:
cluster_text = []
for index, value in enumerate(k[1],start=1):
cluster_text.append('(' + str(index) + '): ' + index2corpus[value])
cluster_text = '\n'.join(cluster_text)
file_write.write("【簇索引】:{} \n【簇中文档数】:{} \n【簇中文档】 :\n{}".format(k[0], len(k[1]), cluster_text))
file_write.write('\n')
file_write.flush()

最新文章

  1. 2016年10月16日--ArrayList集合、特殊集合
  2. Struts2(十)OGNL标签二与Struts2标签
  3. revel + swagger 文档也能互动啦
  4. (转载)最黑的黑客米特尼克:多次耍FBI 终被高手擒
  5. css中的@inport 与link
  6. Python基础2:反射、装饰器、JSON,接口
  7. 《powershell 的版本号所引起的载入 FSharp 编译器问题》基本解决
  8. 记一次高并发场景下.net监控程序数据上报的性能调优
  9. gulp学习笔记——最好的学习文档是官网
  10. EE4218 / EE4216 Faculty of Science and Engineering
  11. C#异常断电后重新启动项目出现配置未初始化错误
  12. 实验楼-Git实战教程
  13. [Vuex] Split Vuex Store into Modules using TypeScript
  14. 【Python全栈-后端开发】Django进阶1-分页
  15. ORM常用操作
  16. Jscraft 使用 Shell 与预先加载别名混合使用
  17. 前端框架 Vue 初探
  18. 【推荐】ImageProcessor.Web,再也不用自己生成缩略图了
  19. Java之IO流(字节流,字符流)
  20. NodeJS中resolve添加地址无效

热门文章

  1. 第三章 VIVADO 自定义IP 流水灯实验
  2. 【广搜】Knight Moves
  3. Spring实战(三)Spring中装配Bean的三种方式---XML、JavaConfig、AutoWire
  4. Api文档生成工具与Api文档的传播(pdf)
  5. react 管理平台
  6. Windows 编程 键盘
  7. HTML练习二--动态加载轮播图片
  8. Vue大概知识体系和学习参考
  9. java实现判定新旧版本号
  10. list 列表 函数的引用