import re
import pandas as pd
import csv
import jieba
import collections
import random
import math
import wordcloud as wc
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np

def cos_sim(ver1,ver2):
    xy=0
    x=0
    y=0
    for i in range(len(ver1)-1):
        xy=xy+ver1[i]*ver2[i]
        x=ver1[i]^2+x
        y=ver2[i]^2+y
    return xy/(math.sqrt(x)*math.sqrt(y))

jieba.setLogLevel(jieba.logging.INFO)

stopwords='停用词库.txt' #使用的是jieba的停用词库
top=50 #前50高词频
background='词云背景.jpg'#绘制词云使用的背景图
Output1='高词频结果.txt' #高词频结果输出文件
Output2='低词频结果.txt'#低词频结果输出文件
loaddict='用户词典.txt'#自定义词典
danmu_seperate=[] #分词后的列表
danmu_clear=[]  #去掉停用词后的列表
#打开停词表和弹幕文本
with open(stopwords,'r',encoding='UTF-8') as s:
    Stopwords=set(s.read().split('\n'))
Stopwords.add(' ')
Stopwords.add(" ")
with open('danmuku.csv','r',encoding='UTF-8') as f:
    col1=csv.DictReader(f)
    col1=[row['content'] for row in col1]
#####
#jieba分词时不分开的词
jieba.suggest_freq('徐大sao', True)
jieba.suggest_freq('郭乐乐', True)
jieba.suggest_freq('恰饭',True)
jieba.suggest_freq('味道',True)
jieba.suggest_freq('户部巷',True)
pattern=re.compile(u'\t|\n|\.|-|:|;|\)|\(|\?|"')
'''for word in col1:
    #print(word)
    if i>100000:
        break
    word=re.sub(pattern,'',word)
    danmu_seperate=danmu_seperate+jieba.lcut(word,cut_all=False)'''
sen='。'.join(col1)
sen=re.sub(pattern,'',sen)
#进行分词
danmu_seperate=jieba.lcut(sen,cut_all=False)
#启动用户自定义词表
jieba.load_userdict(loaddict)
#去掉停用词以及长度小于2或大于5的词语
for word in danmu_seperate:
    if word not in Stopwords and 6>len(word)>1:
        danmu_clear.append(word)
##词频统计
count=collections.Counter(danmu_clear)
#高词频：词频在前1%
leng=len(count.keys())//100 #leng为特征词集的元素个数
top_=count.most_common(leng)#选取词频前1%的特征词作为特征词集
fileOut = open(Output1,'w',encoding='UTF-8')     # 创建高频词文本文件
print("高频词\n")
for t,f in top_:                       # 获取词语和词频                    
    print(t + '\t',str(f) + '\n')                    # 逐行输出数据
    fileOut.write(t + '\t' + str(f)+'\n') # 逐行写入高频词的文本中
fileOut.close()
#低频词：出现次数小于6
fileOut2 = open(Output2,'w',encoding='UTF-8')
least=[ k for k,v in count.items() if v<6]
print("低频词\n")
for l in least:
    print(l +'\t',str(count[l])+'\n')
    fileOut2.write(l +'\t'+ str(count[l])+'\n')
fileOut2.close()
dic_new=dict(top_)
dicli=dic_new.keys()#取出字典中的高频词构成特征词集
vector_=[]#弹幕向量集
for w in col1:
    w1=re.sub(pattern,'',w)
    if len(w)>4:#选取弹幕字数大于4的
        danmu=jieba.lcut(w1,cut_all=False)
        i=0
        li=[0]*(leng+1)#包括该弹幕的向量以及该弹幕
        for cw in dicli:
            if cw in danmu:
                li[i]=li[i]+1
            i=i+1
        li[leng]=w
        vector_.append(li)
#####上述为弹幕生成向量集
num=1000#随机选取1000条弹幕
li_random=random.sample(vector_,num)
dis=[[0]*num]*num
close_far=[(0,0),(0,0)]
min=1
max=-1
for i in range(num):
    for k in range(num):
        dis[i][k]=cos_sim(li_random[i],li_random[k])
        if dis[i][k]<min:
            min=dis[i][k]
            close_far[0]=(i,k)
        if dis[i][k]>max and i!=k:
            max=dis[i][k]
            close_far[1]=(i,k)
x1,y1=close_far[0]
x2,y2=close_far[1]
print(li_random[x1][leng]+'\t'+li_random[y1][leng]+'\n')
print(li_random[x2][leng]+'\t'+li_random[y2][leng])
#####上述为弹幕之间的距离

font =r'/System/Library/Fonts/STHeiti Light.ttc'
print("开始绘制词云图........")
mask=np.array(Image.open('词云背景图.jpeg'))
w=wc.WordCloud(
    font_path=font,
    mask=mask,
    max_words=50,
    max_font_size=100
)
w.generate_from_frequencies(dict(top_))
w.to_file("top50词云图.png")
plt.figure('词云')                                                               # 弹框名称与大小
plt.subplots_adjust(top=0.99,bottom=0.01,right=0.99,left=0.01,hspace=0,wspace=0) # 调整边距
plt.imshow(w, cmap=plt.cm.gray, interpolation='bilinear')                       # 处理词云
plt.axis('off')      
plt.show()
#####上述为词云图的绘制




