# coding = UTF-8
# @Time : 2022/06/20 14:41
# @Author : PP_YY
# @File : main.py
# @Description :
# 1．输入两个URL，抓取两个网页的HTML源码；
# 2．从抓取到的HTML源码中解析出文章内容，并显示；
# 3．计算每篇文章对应的TF向量，并显示；
# 4．计算两篇文章的相似度。
import re  # 正则表达式模块import re #正则表达式模块
from bs4 import BeautifulSoup  # 进行数据获取，网页解析
import urllib.request, urllib.parse  # 指定url获取数据
import jieba    # 分词
import numpy
import tkinter
import os
import threading
import time


# 创建图形化顶层界面
top = tkinter.Tk()
top.title("余弦相似度计算")
top.geometry('500x300')
top.resizable(0, 0)
entry_string1 = tkinter.Entry(top, bd=1,width=50)
entry_string2 = tkinter.Entry(top, bd=1,width=50)
show_data = tkinter.Text(top,bd=0,font=('等线', 10),highlightthickness=0,state="disable",width=56,height=5)

# 需要用到的正则表达式
find_p = re.compile(r'<p(.*?)</p>') #找到所有带p的标签
findChinese = re.compile(r'\s*\w*\s*[\u4e00-\u9fa5]+\s*\w*\s*')  # 找汉字
findSpace = re.compile(r'\s+') #找空白字符


#构造请求语句 str:url
def askurl(url):
    head = {
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/79.0.3945.147 Safari/534.24 Device/elish Model/M2105K81AC XiaoMi/MiuiBrowser/14.6.50"}
    request_t = urllib.request.Request(url, headers=head)
    try:
        html = urllib.request.urlopen(request_t)
    except urllib.error.URLError as e1:
        print(e1)
    except urllib.error.HTTPError as e2:
        print(e2)
    # print(html.read().decode())
    return (html)


#获取网页源码中与文章相关内容
def get_html_data(_baseurl):
    datalist = []
    url = _baseurl
    html = askurl(url)
    soup = BeautifulSoup(html, "html.parser")

    soup = str(soup)
    datalist = re.findall(find_p, soup)
    return soup, datalist

#将中文摘取出来
def do_match(datalist):
    for i in range(len(datalist)):
        datalist[i] = ''.join(re.findall(findChinese, datalist[i]))
    return datalist


#构造成完整的文章
def get_text(datalist):
    text_str = '\n'.join(datalist)
    return text_str


#分词并去重生成词库
def get_list(text):
    text_str = re.sub(findSpace, '', text)
    cut_word = black_list(list(jieba.lcut(text_str)))
    cut_word_unique = list(numpy.unique(cut_word))
    return cut_word, cut_word_unique


#计算TF值
def calculate_TF(str_to_cal, list2):
    denominator = len(str_to_cal)
    ret_list = [0] * len(list2)
    for i in range(len(list2)):
        # print(list2[i], str_to_cal.count(list2[i]), denominator)
        ret_list[i] = str_to_cal.count(list2[i])/denominator

    return ret_list


#计算余弦值
def calculate_cos_similar(A,B):
    if len(A) != len(B):
        return 2
    if A == B:
        return 1
    up = 0
    downA = 0
    downB = 0
    for i in range(len(A)):
        up += A[i] * B[i]
    for i in range(len(A)):
        downA += A[i] ** 2
    for i in range(len(A)):
        downB += B[i] ** 2
    downA = downA ** 0.5
    downB = downB ** 0.5

    # print(up)
    # print(downA)
    # print(downB)

    return up/(downA*downB)

#黑名单清洗list
def black_list(list1):
    return_list = []
    black_list = ['的', '你', '我', '他', '她', '它', '他们', '她们', '它们', '新浪', '公司', '版权所有', 'Reserved', '是', '而', '这', '这些', '这个', '都', '一起', '与', '和', '并', '中', '也', '了', '在', '被', '把', '对', '让', '使用', '着', '成为', '里', '能', '用']
    for i in list1:
        if i not in black_list:
            return_list.append(i)
    return return_list


def open_file(path):
    execute_str = "Notepad " + path
    if os.path.exists(path):
        os.system(execute_str)
        return True
    return False


def main():
    global show_data, entry_string2, entry_string1
    url1 = entry_string1.get()
    url2 = entry_string2.get()
    # url1 = 'https://k.sina.com.cn/article_1631180077_6139d52d001012oaf.html'
    # url2 = 'https://www.cnblogs.com/sealio/p/16392676.html'

    soup, datalist =get_html_data(url2)
    soup1, datalist1 = get_html_data(url1)
    #得到所有包含中文的串
    datalist = do_match(datalist)
    datalist1 = do_match(datalist1)

    #整合成文章，以及分词并去除黑名单的词
    text_str = get_text(datalist)
    text_list, text_list_unique = get_list(text_str)
    text_str1 = get_text(datalist1)
    text_list1, text1_list_unique = get_list(text_str1)

    #合并成一个词库
    garage = list(numpy.unique(text_list+text_list1))

    #分别计算TF值
    TF_list1 = calculate_TF(text_list, text_list_unique)
    TF_list2 = calculate_TF(text_list1, text1_list_unique)
    TF_dict1 = dict(zip(text_list_unique, TF_list1))
    TF_dict2 = dict(zip(text1_list_unique, TF_list2))

    #将词库扩充为两个词库合集后计算TF
    TF_similar_list = calculate_TF(text_list, garage)
    TF_similar_list1 = calculate_TF(text_list1, garage)

    #计算余弦相似度并输出到界面
    similar = calculate_cos_similar(TF_similar_list, TF_similar_list1)

    show_text = ''
    if similar == 2:
        show_text = show_text + "输入的数据不对！"
    else:
        show_text = show_text + str(similar)
    show_data.config(state="normal")
    show_data.delete('1.0', 'end')
    show_data.insert('1.0', show_text)

    path1 = 'result.txt'
    path2 = 'result_TF.txt'

    #将文章主体写入result.txt
    f = open(path1, 'w+', encoding='UTF-8')
    f.write("文章1：\n")
    f.write(text_str)
    f.write("\r\n\r\n文章2：\n")
    f.write(text_str1)
    f.close()

    # 将文章主体写入result_TF.txt
    f1 = open(path2, 'w+', encoding='UTF-8')
    f1.write("文章1：\n")
    f1.write(str(TF_dict1))
    f1.write("\r\n\r\n文章2：\n")
    f1.write(str(TF_dict2))
    f1.close()

    # 打开生成的两个文件
    threading.Thread(target=open_file, args=(path1,)).start()
    time.sleep(1)
    threading.Thread(target=open_file, args=(path2,)).start()


def GUI_main():
    global entry_string1, top, show_data, entry_string2
    notice_label = tkinter.Label(top, text="请输入两个向量,用\",\"隔开", width=40, font=('等线', 12))
    start_button = tkinter.Button(top, text="计算", command=main, bd=1, font=('等线', 12))
    notice_label1 = tkinter.Label(top, text="余弦相似度为:", font=('等线', 8))
    entry_string1.place(x=15, y=40)
    entry_string2.place(x=15, y=70)
    start_button.place(x=380, y=50)
    notice_label.place(x=-80, y=10)
    notice_label1.place(x=15, y=100)
    show_data.place(x=15, y=120)
    top.mainloop()


if __name__ == "__main__":
    GUI_main()