

import json
import time
import traceback
import pymysql
from pymysql.converters import escape_string
import SQL_CRUD_Interface
import Tools_Interface
import re
import emoji
import requests
from collections import Counter
import os
from collections import defaultdict
import operator


def check_words(word):
    pass

mylist = []
myDict = {}
total_content = ""



def get_txt_content(path):
    with open(path, 'r', encoding="utf-8") as f:
        return f.read()


for ii in range(8):
    file_path = r"C:\Users\QAQ\Desktop\wiki_txt\{}.txt".format(ii)
    print(file_path)
    time.sleep(1)
    total_content = get_txt_content(file_path) + "   --  --   --   --   --    " + total_content


print("延时 5 s ")
time.sleep(5)


txt_content = Tools_Interface.remove_all_NotEnglish(total_content)     # 清除 非英文单词
list1 = txt_content.split(" ")
for ii in list1:
    # 是否含有大写字母
    if not Tools_Interface.bool_Include_Capital_letter(ii):
        mylist.append(ii)



'''
from collections import Counter
list_x = [2, 3, 4, 6, 6, 7, 3, 2, 8, 3, 6]
c = Counter(list_x)
result = c.most_common(3)    # 得到出现频度最高的3个元素
print(c)
print(result)
'''




myset = set(mylist)
print(" count apple is :  "+ str(mylist.count("apple")))


c = Counter(mylist)
result = c.most_common(15000)    # 得到出现频度最高的 xxx 个元素
# print(c)
# print(result)


for ii in result:
    print(ii[0])
    # print("    {}  -----    {}".format(ii[0], ii[1]))


print("stopping ... ")
time.sleep(86787)
time.sleep(86787)
time.sleep(86787)
time.sleep(86787)
time.sleep(86787)

for ii in myset:
    # print("{} ---  {}".format(ii , mylist.count(ii)))
    myDict[ii] = mylist.count(ii)





list1 = sorted(myDict.items(), key=lambda x: x[1], reverse=True)
index = 0
for ii in list1:
    index = index + 1
    print(" {} ---   {}  ---  {}".format(index ,  ii[1],  ii[0]))




