#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May  7 14:47:38 2022

@author: cythnia
"""

#——————————————————————————————————————————————————————————#
#txt文件文本分析
#——————————————————————————————————————————————————————————#
#导入工具包
import jieba
import os
import pandas as pd
import numpy as np
#设置作用路径
os.chdir('/Users/cythnia/Desktop/txt/')
path='/Users/cythnia/Desktop/txt/'
path1='/Users/cythnia/Desktop/txt2/'
#读取关键词文件
lis=pd.read_excel('/Users/cythnia/Desktop/数字化转型.xlsx')
print(lis)
lis=list(lis['关键词'])
#创建空列表存取文件名
# lis1=[]
# #读取txt文件夹列表
# name=os.listdir(path)
# for names in name:
#     if names.endswith('.txt'):
#         lis1.append(names)
# print(lis1)         
# #循环读取txt文件并去除换行符
# for wenjian in lis1:
#     outPutString='' 
#     # print(wenjian)
#     x=wenjian.split('年')[0]
#     fw=open(path1+'%s'%wenjian,'w',encoding="utf-8")
#     f=open('%s'%wenjian,'r',encoding='utf-8')
#     lines=f.readlines()
#     for i in range(len(lines)):
#         if lines[i].endswith('\n'):
#             lines[i]=lines[i][:-1] #将字符串末尾的换行符去掉
#     for j in range(len(lines)):
#         outPutString+=lines[j]
#     fw.write(outPutString)    
#     # text=open('2.txt','r').read()
#     # print(text)
# #定义关键词
wordsByMyself=lis
for i in range(len(wordsByMyself)):
    jieba.add_word(wordsByMyself[i])
# ####读取去除换行符之后的文件
# #获取文件名列表
textlis=os.listdir(path1)
lis2=[]
for textnames in textlis:
    if textnames.endswith('.txt'):
        lis2.append(textnames)  
print(lis2)
tongji=pd.DataFrame(columns=['关键词'])
#for循环对每个文件进行读取并分词
for texts in lis2[2:3]:
    f=open(path1+'%s'%texts,encoding='utf-8').read()
    texti=jieba.lcut(f)
 #step2:读取停用词表，去停用词
    stopwords = {}.fromkeys([line.rstrip() for line in open(path1+'哈工大停用词表.txt',encoding='utf-8')])
    final = ""
    for word in texti:
        if word not in stopwords:
            if (word != "。" and word != "，") :
                final = final + " " + word #空格进行词语拆分    
#筛选字数大于2的词，进行词语统计，并添加到text2中
    text2=[]
    counts={}
    for word in texti:
        if(len(word))==1:
            continue
        else:
            # text2.append(word)
            counts[word]=counts.get(word,0)+1
        #统计自定义词语频率
    print(texts+'关键词频结果：——————————————————————————————————')
    for i in range(len(wordsByMyself)):
        if wordsByMyself[i] in counts:
            print(wordsByMyself[i]+':'+str(counts[wordsByMyself[i]]))
            text2.append(str(counts[wordsByMyself[i]]))
        else:
            print(wordsByMyself[i]+':0')
            text2.append('0')
    #列表竖转横        
    text2=','.join(text2)
        # print(text2)
        #将text2进行转置（前后顺序倒转）
        # text3 =list(reversed(text2))
        # print(text3)    
    tongji.loc[texts]=text2
    
tongji.to_csv('/Users/cythnia/Desktop/词频.csv')



#——————————————————————————————————————————————————————————#
#读取csv文件进行修正表格
#——————————————————————————————————————————————————————————# 
data=pd.read_csv('/Users/cythnia/Desktop/词频.csv')   
#分裂关键词
for i in range(0,76):
    # data['关键词']=int(data['关键词'])
    data[i]=data['关键词'].str.split(',').str[i]
data.info()
#删除关键词列
data.drop('关键词',axis = 1,inplace = True) #axis参数默认为0
data.head()
data['Unnamed: 0'].head()
#提取代码和公司年份
data['Unnamed: 0']=data['Unnamed: 0'].str.split('年').str[0]
data['证券代码']=data['Unnamed: 0'].str.split('：').str[0]
data['年份']=data['Unnamed: 0'].str.split('：').str[1].str.extract(r'(\d+)')
data['公司名称']=data['证券代码'].str.extract(r'(\D+)')
data['证券代码']=data['证券代码'].str.extract(r'(\d+)')
data.drop('Unnamed: 0',axis = 1,inplace = True) #axis参数默认为0
data.to_csv('/Users/cythnia/Desktop/词频1.csv')




data1=pd.read_csv('/Users/cythnia/Desktop/词频1.csv') 
data1.drop('Unnamed: 0',axis = 1,inplace = True) #axis参数默认为0
date=data1.loc[:,'0':'75']
datee=data1.loc[:,'证券代码':'公司名称']
date.columns=[lis]
result=pd.concat([datee,date],axis=1)
print(result)
result.to_csv('/Users/cythnia/Desktop/关键词.csv')
# print(text2)

# #对文件进行分词
# text=jieba.lcut(text)
# text2=[]
# #筛选字数大于2的词，并添加到text2中
# for word in text1:
#     if(len(word))>=2:
#         text2.append(word)
# print(text2)