# 导入所需的库和模块
from snownlp import SnowNLP
import webbrowser as web
import re
import requests
import csv
import numpy as np
import PySimpleGUI as sg
import os
import wordcloud
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.font_manager as fm
import pandas as pd
import jieba
from collections import Counter




def getPosition(string, target):
    tmpposition = [i for i, char in enumerate(string) if char == target]
    return tmpposition[0]


# 定义情感分析函数emotion()
def emotion(flag):
    # 设置中文字体
    plt.rcParams['font.family'] = 'SimHei'  # 使用中文字体“黑体”

    # 使用pandas库读取评论数据并存储到列表中
    if flag == 0:
        comment = pd.read_csv('comment.csv', encoding='utf-8-sig')
    else:
        comment = pd.read_csv('comment2.csv', encoding='utf-8-sig')
    items = comment['Comment'].astype(str).tolist()[0:]
    product = comment['itemName'].astype(str).tolist()[0:]
    commentTime = comment['commentTime'].astype(str).tolist()[0:]
    itemId = comment['itemId'].astype(str).tolist()[0:]
    itemPage = comment['itemPage'].astype(str).tolist()[0:]

    # 定义一个空列表D用于存储情感分析结果
    D = []
    # 使用SnowNLP库对每条评论进行情感分析，判断情感倾向，并将结果存储到列表D中
    for i in range(len(items)):
        s = SnowNLP(items[i])
        t = s.sentiments
        #    print(t)
        if t >= 0.6:
            sentiment = "正面"
        elif t <= 0.4:
            sentiment = "负面"
        else:
            sentiment = "中性"
        a = [itemId[i], itemPage[i], product[i], commentTime[i], items[i], t, sentiment]
        D.append(a)
    # print(D)
    # 将情感分析结果保存到CSV文件中
    with open('./情感词分析.csv', 'w', encoding='utf-8-sig', newline='') as f1:
        write = csv.writer(f1)
        write.writerow(['itemId', 'itemPage', 'itemName', 'commentTime', 'Comment', 'Sentiment_score', 'Sentiment'])
        write.writerows(D)

    # 使用pandas库读取保存的情感分析结果
    data = pd.read_csv('./情感词分析.csv', encoding='utf-8-sig')
    # 根据商品名称进行分组并计算情感得分平均值
    ProductsID = data['itemId'].unique()
    product_sentiment_scores = data.groupby('itemName')['Sentiment_score'].sum()
    # 生成柱状图展示不同商品的情感得分
    plt.figure(figsize=(12, 6))
    sns.barplot(x=ProductsID, y=product_sentiment_scores.values / len(ProductsID))
    plt.title('Sentiment_Score by Products')
    plt.xlabel('Products')
    plt.ylabel('Sentiment_score')
    plt.xticks(rotation=0, fontsize=8)
    plt.tight_layout()
    # 保存柱状图为图片
    plt.savefig('sentiment_analysis.png')  # 保存为PNG格式
    # 获取保存的图片的相对路径
    image_path = os.path.join(os.getcwd(), 'sentiment_analysis.png')
    return image_path


def cookieManager(s_g):
    file = open("cookie.csv", "r")
    df = pd.read_csv(file)
    data = np.array(df)
    for j in range(len(data)):
        headers = {
            "Referer": 'https://detail.tmall.com/item.htm?id=544636202128',
            "User-Agent": data[j][1],
            "cookie": data[j][0]
        }
        url = "https://rate.tmall.com/list_detail_rate.htm"
        urlparams = {
            "itemId": '544636202128',
            "sellerId": "3",
            "currentPage": str(1),
            "callback": "jsonp723"
        }
        r = requests.get(url, params=urlparams, headers=headers)
        match = re.search(r'"lastPage":(\d+)', r.text)
        if match is None:
            pass
        else:
            return headers
    s_g.Popup('储存所有cookie失效', font='黑体', text_color='red')  # cookie弹窗
    return None


def commentSort2(sg, url):  # 网址搜索
    # url = input()  # 文本框输入
    list = commentSort(sg, url)
        # return
    if list==None:
        return False
    file = open("comment2.csv", "w", encoding="UTF8")
    csv_write = csv.writer(file)
    csv_write.writerow(["itemId", "itemPage", "itemName", "commentTime", "Comment"])
    for j in range(len(list)):
        csv_write.writerow([str(0), url, '商品', list[j], list[j]])
    file.close()
    return True


def commentSort(s_g, url):
    start = url.find('id=')
    end = url.find('&')
    itemId = url[start:end]
    if url.find('id=')==-1|url.find('&')==-1:
        s_g.Popup('网址输入错误！', font='黑体', text_color='red')
        return
    headers = cookieManager(s_g)
    # headers = {
    #     "Referer": 'https://detail.tmall.com/item.htm?id=' + itemId,
    #     # "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67',
    #     # "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.4031 SLBChan/105',
    #     "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67',
    #     # "cookie": '__wpkreporterwid_=69e0384b-9faa-4a21-2f6b-b081e76cc9e9; mt=ci%3D-1_0; ariaDefaultTheme=undefined; thw=cn; lego2_cna=4UUW80XCMXW0UPDH0H24C2UT; xlly_s=1; _samesite_flag_=true; cookie2=1a6434983dd60fe904490854fbbd0e9d; t=db2204cc2e013e021e9184d6e054fb8e; _tb_token_=3d35ee99f5573; ctoken=H-8k1oiA9cZUlURPUHlk9keQ; _m_h5_tk=ab7a4e642cd1db8f02d397f19ea5cf36_1688577710609; _m_h5_tk_enc=3ede124c709d4750234c124bb7341b0e; cna=umiCG7QX/2cCASe9JSPxNIyD; sgcookie=E100WC%2Fuvqu0nR2fFJ4k9k%2FKDvxy4bhNDlrKxtLdB44p%2FqalB804tdm702k2l2gaATowlbJZZ96d%2BVv0WRUM8py3iBzkG1qcOcVtPiQLyDDCXL4uapwLcVBA6V%2BZsJUNIrPE; unb=2214220956211; uc3=id2=UUpgQhIxxa%2FdZnGcLQ%3D%3D&vt3=F8dCsGIL9iG%2FrXL3nRg%3D&lg2=Vq8l%2BKCLz3%2F65A%3D%3D&nk2=F5RMGLIoSLk3yl5U; csg=158d27b0; lgc=tb9149557011; cancelledSubSites=empty; cookie17=UUpgQhIxxa%2FdZnGcLQ%3D%3D; dnk=tb9149557011; skt=5fbc8063c48b7701; existShop=MTY4ODU2NzcxMQ%3D%3D; uc4=nk4=0%40FY4HXZvdQ%2Fu%2BLun8N2LPLf26vDqF5n4%3D&id4=0%40U2gqzcK2kg9kbirNVRTLXKHPE1NunXog; tracknick=tb9149557011; _cc_=VT5L2FSpdA%3D%3D; _l_g_=Ug%3D%3D; sg=111; _nk_=tb9149557011; cookie1=B0FnQG%2F0KTCfIs8hbostTcpe4rXGUpaetzxVNbgLLc8%3D; mt=ci=0_1; uc1=cookie21=V32FPkk%2FgPzW&cookie15=UtASsssmOIJ0bQ%3D%3D&existShop=false&pas=0&cookie14=Uoe8gqXeswQB5Q%3D%3D&cookie16=UtASsssmPlP%2Ff1IHDsDaPRu%2BPw%3D%3D; tfstk=cCgcBSxaTmrjHuLtFxaXkJCTZ7xdwvj_KxMKzCPS2tAVK95cPgSF7k-Lhp9Vf; l=fB_27HbrLqqYpj2bXOfaFurza77OSIRYYuPzaNbMi9fPO7CB5oUPB1sWQDY6C3GVFspkR3ooemR2BeYBqQAonxvtNSVsr4DmndLHR35..; isg=BK-vcGQFF4PyehXhy9MoowX8PsO5VAN2x5sW28E8S54lEM8SySSTxq3ClgAuaNvu'
    #     # "cookie": 'lid=tb961788446; enc=2X5YNT2V7yVmWjyIIly75RFQaHzYcDqH0skxuVbfJ4POWfleJujOkOHFsZNqiDur1Ez1wxKAngKJw7Cc0CMsjVRvlFYZ%2F0dYv9gqV9xUkM0%3D; csa=0_0_0_0_0_0_0_0_0_0_0_0_0; sm4=330100; cna=IJcpHa/hhB4CAXAKgl6lOFnR; xlly_s=1; _m_h5_tk=6170d0393b000247430ccbb78e0716f2_1688553595521; _m_h5_tk_enc=ff11fbf0cc8292cafc909c60f18f4d0e; t=29f43752392509689b6700f72324e2a4; tracknick=tb961788446; lgc=tb961788446; _tb_token_=5e31b57184bae; cookie2=15d84100ced82cebedf205ab04e65080; dnk=tb961788446; uc1=cookie16=VFC%2FuZ9az08KUQ56dCrZDlbNdA%3D%3D&cookie21=Vq8l%2BKCLjA%2Bl&existShop=false&cookie14=Uoe8gqXctvCKkQ%3D%3D&pas=0&cookie15=U%2BGCWk%2F75gdr5Q%3D%3D; uc3=vt3=F8dCsGIL8E1WflJlu90%3D&lg2=UIHiLt3xD8xYTw%3D%3D&nk2=F5RMHy22%2BJ2UzRE%3D&id2=UUphwo%2BPsORP9Q5%2B5g%3D%3D; _l_g_=Ug%3D%3D; uc4=id4=0%40U2grGReaBrYANAGwqlvaA6wbmZvSi53q&nk4=0%40FY4HWrRGRYJFWsYTHqAvXE3QC7v5zQ%3D%3D; unb=2208881772092; cookie1=UNJQ7JKj9a2tUxGaH%2F6mX%2BkckWGuuN6Y8bIAdYH18i0%3D; login=true; cookie17=UUphwo%2BPsORP9Q5%2B5g%3D%3D; _nk_=tb961788446; sgcookie=E100KW4bvu6oRAKJTBtkVIFV1N%2FtaDVmymdjzBqmD2rrGWPe1odaynibnPuwbyr65ck0HDOzx%2FUG5K6nzvEAhN9nvzy4p%2BJ1XhEo7vOOOQNcuU2BVfm3b1ePh6EKv9DztkUl; cancelledSubSites=empty; sg=628; csg=67ae8dee; tfstk=drMWUl6epabWYu4IqTtqG36chvyIN49wNMZKjDBPv8e8A9iiR0yEvykQdq0Y9Yyr4q9IjVNz9vzEdgDj59WyrQmddJyp7FJwQ0qzKJLwwF9ZqpneJZLwQdokwu2EDF8HcmKCUzAM06Yi0dWfHOFxMJZ5v9WEhoL3W0aumoDbV7aTMr6NSOz6_g1BGLWQcP-Xc6fn-D_0-; l=fBjwLPKmN2L045JyBOfwFurza77tIIRAguPzaNbMi9fP_pf95K8dW1s5FnTpCnGVF6yHR3lz63MyBeYBq3xonxvOa6Fy_CDmnmOk-Wf..; isg=BFxc7jsjVMoCBSCTJYjeCGtgLXoO1QD_-TfGxjZdbscqgfwLXuRojvIz5el5CThX'
    #     "cookie": 'cna=/aDnHHjoXkgCATy/eiL3QJWb; t=b33b713df831fa606f1bfccc70f8dcdf; _tb_token_=ee7418eb58bab; cookie2=1baa813fe8887f9e4802cb87ba92e611; _m_h5_tk=dcd0fb69cb161ceddf0ea1d9ea96cdd7_1688615618855; _m_h5_tk_enc=713571faa31b097a8b7f8444f5c1bb9d; xlly_s=1; dnk=tb15313005; uc1=pas=0&cookie16=VT5L2FSpNgq6fDudInPRgavC%2BQ%3D%3D&cookie21=URm48syIYn73&existShop=false&cookie14=Uoe8gqbdG4HJ6Q%3D%3D&cookie15=WqG3DMC9VAQiUQ%3D%3D; uc3=vt3=F8dCsGIL%2Bu7Oby2IvgU%3D&id2=UUphzpdxr0jSaEd9uQ%3D%3D&nk2=F5REO%2BuexXqzJA%3D%3D&lg2=URm48syIIVrSKA%3D%3D; tracknick=tb15313005; lid=tb15313005; _l_g_=Ug%3D%3D; uc4=id4=0%40U2grFbTGur%2BD9uIjBGAwc%2B9H7uTCaRvT&nk4=0%40FY4PaQn2X%2B%2FEauk4k173C5MGcs4N; unb=2204090168112; lgc=tb15313005; cookie1=VTk6Xbr8ZPmHBiTW27GY8g8hvtWMF4ZYh7bInx2smN8%3D; login=true; cookie17=UUphzpdxr0jSaEd9uQ%3D%3D; _nk_=tb15313005; sgcookie=E100xA86LpXu%2BtpjFFfiyo%2BKLo8LHv8MyrPlCIggy7Mz%2Fccxnrnejo8zLapgrJYcpL1kaLlKvaDp2gNDo1v2HuKJcmwkn11ghcz7fIL3YEdE1SE9F3T7GWTmOvuSZD4pu24I; cancelledSubSites=empty; sg=52e; csg=f6e747e4; tfstk=cSAhBo96_pWBUlhJfH1I9M5XWggOZxMFAQRw_47uYYYhyaANi-2a34jSSMnjY81..; l=fBPDHGD4N6-L72m2BOfwPurza77OSIRAsuPzaNbMi9fPO9595u1CW1sPl7LpC3hVFsAyR3ooemR2BeYBqIvn3r4AnrZgU8kmne_7Qn5..; isg=BJKSTEY6Iovv3l73m8L0ddYO41h0o5Y9kpSb0FzrvsUwbzJpRDPmTZiN3svTOA7V'
    # }
    url = "https://rate.tmall.com/list_detail_rate.htm"
    page = 1
    comLists = []
    dateLists = []
    urlparams = {
        "itemId": itemId,
        "sellerId": "3",
        "currentPage": str(page),
        "callback": "jsonp723"
    }
    r = requests.get(url, params=urlparams, headers=headers)
    match = re.search(r'"lastPage":(\d+)', r.text)
    maxPage = int(match.group(1))
    while page < 21:
        if maxPage < page:
            break
        urlparams = {
            "itemId": itemId,
            "sellerId": "3",
            "currentPage": str(page),
            "callback": "jsonp723"
        }
        r = requests.get(url, params=urlparams, headers=headers)
        for list in (re.findall('"rateDate":"(.*?)"', r.text)):
            listed = list[0:10]
            dateLists.append(listed)
        comLists.extend(re.findall('"rateContent":"(.*?)"', r.text))
        page += 1
    finalList = []
    for i in range(len(dateLists)):
        finalList.append([dateLists[i], comLists[i]])
    return finalList


def urlsGet(commodity):
    search = commodity
    headers = {
        "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.4031 SLBChan/105',
        "cookie": 'thw=cn; enc=2X5YNT2V7yVmWjyIIly75RFQaHzYcDqH0skxuVbfJ4POWfleJujOkOHFsZNqiDur1Ez1wxKAngKJw7Cc0CMsjVRvlFYZ%2F0dYv9gqV9xUkM0%3D; xlly_s=1; lgc=tb961788446; tracknick=tb961788446; mt=ci=72_1; cna=IJcpHa/hhB4CAXAKgl6lOFnR; t=adfc3d3cb856382ccbb4f0b9ffee6c7c; _m_h5_tk=49cb07b02d8569ab219008b240bfd857_1688448398389; _m_h5_tk_enc=be4aa672b41489b909cd108d76318bde; sgcookie=E100dw4JuJrVmBsK4tGbSyw%2Fr%2BZngpVLaFZr3cIftmexeEoH7n744laKqKkFhPrftZz57X6swbsLV%2BiF06sA5SVUHxpweK7WMl4Gb4axJXaYz88y9RzkPlO%2BL5Ho1LqFzsCb; uc3=lg2=VFC%2FuZ9ayeYq2g%3D%3D&nk2=F5RMHy22%2BJ2UzRE%3D&id2=UUphwo%2BPsORP9Q5%2B5g%3D%3D&vt3=F8dCsGIKcLDxrneFiWU%3D; uc4=nk4=0%40FY4HWrRGRYJFWsYTHqAvXExBHJ1bTg%3D%3D&id4=0%40U2grGReaBrYANAGwqlvaA6wbmJ9ICHDp; _cc_=UtASsssmfA%3D%3D; cookie2=1c19ce3dc6707a157f0bbda40bf8953b; _tb_token_=5ebd3b3838eb5; _samesite_flag_=true; alitrackid=www.tmall.com; lastalitrackid=www.tmall.com; JSESSIONID=B04D1D4F252A240BB25CCB8548B0DB24; l=fBjuc0zeTPR_exNkBOfCnurza77TRIRvjuPzaNbMi9fPOvCH5mRGW1smMo8MCnGVF6rMR3lz63MzBeYBqhcWfdW22j-laiMmne_7Qn5..; isg=BMjIpAfaOEgdKlNyi686QXCmmTbacSx7dZvS-oJ5JMM2XWjHKoFhC0pX1TUtxuRT; tfstk=dKGpHQZlVhx3FJ8JtkpMaD4EKxTMnX3exDufZuqhFcntPlfk8Wvzw0Et4k23Okf8w4nrq03y8zZSV0EoKdAm82PzNnfJiI0eMOFWmBdMIegUa7tmdYl_82o24fZGUO5cJK7AwDc7RLaENqoDgX4LpuCuX_Q2ryeL22Z9_CWaYjcv1bs0MhWBWFBPUJaZiDiZx'
    }
    linkLists = []
    for i in range(0, 45, 44):
        url = 'https://s.taobao.com/search?fromTmallRedirect=true&page=1&q=' + search + '&spm=875.7931836%2FB.a2227oh.d100&tab=mall&bcoffset=0&p4ppushleft=%2C44&s=' + str(
            i)
        r = requests.get(url, headers=headers)
        name = re.findall('"raw_title":"(.*?)"', r.text)
        links = []
        links.extend(re.findall('"detail_url":"(.*?)"', r.text))
        for j in range(len(links)):
            links[j] = (bytes(links[j], "utf-8").decode("unicode_escape"))
            linkLists.append([links[j], name[j]])
    return linkLists


def crawlStep1(commodity):
    file = open("openedCount.txt", "w")
    file.write(str(0))
    file.close()
    file = open("comment.csv", "w", encoding="UTF8")
    csv_write = csv.writer(file)
    csv_write.writerow(["itemId", "itemPage", "itemName", "commentTime", "Comment"])
    urlLists = urlsGet(commodity)
    return urlLists


def month_analyze(sth, s_g, file_path='情感词分析.csv'):
    data = pd.read_csv(file_path, encoding='utf-8-sig')
    Specify_data = data[data['itemName'] == sth]
    Specify_data['commentTime'] = pd.to_datetime(Specify_data['commentTime'])
    Specify_data['Month'] = Specify_data['commentTime'].dt.month
    Specify_data['Day'] = Specify_data['commentTime'].dt.day

    # 定义调色板
    palette = {'正面': (67 / 255, 255 / 255, 183 / 255), '中性': (255 / 255, 206 / 255, 70 / 255),
               '负面': (158 / 255, 41 / 255, 39 / 255)}

    fig = sns.catplot(x='Day', hue='Sentiment', col='Month', data=Specify_data, kind='count', palette=palette,
                      alpha=0.7, height=6, aspect=1.2)
    fig.set_axis_labels('Day', 'Comment Count')
    fig.set_xticklabels(rotation=45)
    # plt.ylim(0, 70)  # 取固定评论数量（自定义）
    # 预警功能
    x = Specify_data['Day'].unique()

    good_counts = Specify_data[Specify_data['Sentiment'] == '正面']['Day'].value_counts()
    neutral_counts = Specify_data[Specify_data['Sentiment'] == '中性']['Day'].value_counts()
    bad_counts = Specify_data[Specify_data['Sentiment'] == '负面']['Day'].value_counts()

    # 添加缺失日期的代码
    for day in range(1, 32):  # 修改范围以适应实际日期范围
        if day not in good_counts:
            good_counts[day] = 0
        if day not in neutral_counts:
            neutral_counts[day] = 0
        if day not in bad_counts:
            bad_counts[day] = 0

    for i in range(1, len(x)):
        if i > 0 and good_counts[x[i - 1]] > 0 and bad_counts[x[i - 1]] > 0 \
                and ((good_counts[x[i - 1]] < bad_counts[x[i - 1]] * 1.1
                      or good_counts[x[i - 1]] < (bad_counts[x[i - 1]] + neutral_counts[x[i - 1]]) * 0.5)):

            max_height = max(good_counts[x[i - 1]], bad_counts[x[i - 1]], neutral_counts[x[i - 1]])
            # plt.annotate('Warning!', (x[i], max_height), ha='center', va='bottom',
            #              xytext=(0, 10), textcoords='offset points',
            #              color=(169 / 255, 72 / 255, 41 / 255),
            #              alpha=1, fontsize=12)
            s_g.Popup('Warning', 'Unusual Trend', title='Warning')  # 带入图形化
        elif i > 0 and neutral_counts[x[i - 1]] > good_counts[x[i - 1]] + bad_counts[x[i - 1]] \
                and neutral_counts[x[i - 1]] > 0 \
                and neutral_counts[x[i - 1]] + good_counts[x[i - 1]] + bad_counts[x[i - 1]] > 0:
            max_height = max(good_counts[x[i - 1]], bad_counts[x[i - 1]], neutral_counts[x[i - 1]])
            # plt.annotate('Caring!', (x[i], max_height), ha='center', va='bottom',
            #              color=(198 / 255, 119 / 255, 48 / 255),
            #              alpha=1, fontsize=12)
            s_g.Popup('Attention', 'Attention Needed', title='Attention')  # 带入图形化才有

    fig_name = f'{sth}_time.png'
    fig_path = fig_name
    fig.savefig(fig_path)
    # plt.show()
    return fig_path


def count_analyze(sth, s_g, file_path='情感词分析.csv'):
    data = pd.read_csv(file_path, encoding='utf-8-sig')
    Specify_data = data[data['itemName'] == sth]
    fig = plt.figure(figsize=(10, 6), dpi=100)
    sizes = Specify_data['Sentiment'].value_counts()
    labels = sizes.index
    palette = {'正面': (67 / 255, 255 / 255, 183 / 255), '中性': (255 / 255, 206 / 255, 70 / 255),
               '负面': (158 / 255, 41 / 255, 39 / 255)}
    plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90, colors=palette.values())
    plt.title('评论类型占比分布')

    plt.axis('equal')
    plt.legend(labels)
    fig_name = f'{sth}_count.png'
    fig_path = fig_name
    fig.savefig(fig_path)
    # 弹窗预警逻辑
    good_count = sizes.get('正面', 0)
    bad_count = sizes.get('负面', 0)
    neutral_count = sizes.get('中性', 0)
    total_count = good_count + bad_count + neutral_count

    if bad_count > good_count or bad_count > (good_count + neutral_count) * 0.5:
        s_g.Popup('Warning', '该商品负面评论较多，需要注意！！', title='Warning')
    elif neutral_count > total_count * 0.5:
        s_g.Popup('Attention', '该商品中立评论较多，需要考虑优化商品', title='Attention')
    # plt.show()
    return fig_path


def crawlStep2(s_g, urlsLists):
    file = open("comment.csv", "a", encoding="UTF8")
    csv_write = csv.writer(file)
    file2 = open("openedCount.txt", "r")
    itemCount = int(file2.read())
    for i in range(itemCount, itemCount + 5):
        if i < len(urlsLists):
            commentLists = commentSort(s_g, urlsLists[i][0])
            for j in range(len(commentLists)):
                csv_write.writerow(
                    [str(i + 1), urlsLists[i][0], urlsLists[i][1], commentLists[j][0], commentLists[j][1]])
        else:
            # print("已读完所有数据")  # 弹窗
            s_g.Popup('已读完所有数据')
            break
    file3 = open("openedCount.txt", "w")
    itemCount = i
    file3.write(str(itemCount))
    file.close()
    file2.close()
    file3.close()


def generate_word_cloud(filename, stopwords_filename, item_id, result_dir="wordcloud-result", top_n=100,
                        font_path="./typeface/SourceHanSans-Regular.otf",
                        height=800, width=1200, scale=2, mode="RGBA", background_color='#F8F8F8', colormap="Set2",
                        max_font_size=300, random_state=42):
    def judge(x):
        if len(x) == 1 or x[0] in ['，', '。', '！', '？', '了', '啊', '啦', '呀', '；']:
            return False
        return True

    # Load stop words
    stop_words_list = []
    with open(stopwords_filename, 'r', encoding='utf-8') as f:
        f_stop_text = f.read()
        stop_words_list = f_stop_text.split('\n')

    # Read comments from CSV using Pandas
    try:
        df = pd.read_csv(filename, encoding='utf-8')
    except UnicodeDecodeError:
        df = pd.read_csv(filename, encoding='gbk')  # Change encoding to gbk if utf-8 fails
    df = df[df['itemId'] == item_id]
    df.dropna(inplace=True)

    # Word segmentation and filtering
    word_list = []
    for contents in df['Comment']:
        seg_list = jieba.cut(contents)
        my_wordList = []
        for myword in seg_list:
            if judge(myword) and myword not in stop_words_list:
                my_wordList.append(myword)
        word_list.extend(my_wordList)

    # Calculate word frequency
    word_freq = Counter(word_list)

    # Remove stop words again
    for stop_word in stop_words_list:
        if stop_word in word_freq:
            del word_freq[stop_word]

    # Generate word cloud
    word_dic = {}
    for word, freq in word_freq.most_common(top_n):
        word_dic[word] = float(freq)
    wordCloud = wordcloud.WordCloud(font_path=font_path, width=width, height=height, scale=scale,
                                    mode=mode, background_color=background_color, colormap=colormap,
                                    max_words=top_n, max_font_size=max_font_size,
                                    random_state=random_state).generate_from_frequencies(word_dic)

    # Save word cloud image
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    file_name = str(item_id) + "-1.png"
    wordCloud.to_file(os.path.join(result_dir, file_name))

    # Display word cloud image
    plt.imshow(wordCloud)
    plt.axis('off')
    # plt.show()

    # Return word frequency
    top_n_freq = word_freq.most_common(10)
    # result = [f"{word}: {freq}" for word, freq in top_n_freq]
    result = [f"{word}: {freq}" for word, freq in top_n_freq]
    # Generate word frequency image
    sns.set_style("whitegrid")
    sns.set_palette("pastel")

    prop = fm.FontProperties(fname=font_path)

    # Create barplot
    plt.figure(figsize=(12, 8))
    sns.barplot(x=[word for word, freq in top_n_freq], y=[freq for word, freq in top_n_freq])
    plt.xticks(rotation=45, ha="right", fontsize=12, fontproperties=prop)
    plt.xlabel("Words", fontsize=14, fontproperties=prop)
    plt.ylabel("Frequency", fontsize=14, fontproperties=prop)
    plt.title("Word Frequency", fontsize=18, fontproperties=prop)

    freq_file_name = str(item_id) + "-2.png"
    plt.savefig(os.path.join(result_dir, freq_file_name))
    # plt.show()

    return result, file_name, freq_file_name

def enter():
    list1 = []
    list2 = []
    no_lst = []
    # 创建窗口布局
    sg.theme('Tan')
    # 首页菜单
    menu = [
        ['选择&F', ['商品::-A-', '超链接::-B-']],
        ['退出&G', ['退出::-C-']]
    ]
    # 商品页面
    layouta = [
        # [sg.Image(filename='2.png')],
        # [sg.Menu(menu, size=(5, 5))],
        [sg.T('请输入要查询的天猫商品：')],
        [sg.In(key='-URL1-')],
        [sg.B('获取不同的商品链接'), sg.B('获取五个商品的评论')],
        [sg.LB(
            list1,
            # disabled=True,
            size=(70, 20),
            select_mode='simple',
            # enable_events=False,
            enable_events=True,
            key='-list1-')],
        [sg.T('第'), sg.Combo(no_lst, size=(10, 10), key='-URL2-'), sg.T('个商品'), sg.B('确定', key='-Y1-'),
         sg.B('打开网页', key='-Web1-'),
         sg.B('分析评论', key='-Ana1-')]
    ]
    # 超链接页面
    layoutb = [
        # [sg.Menu(menu, size=(5, 5))],
        [sg.T('请输入商品链接：')],
        [sg.In(key='-URL3-')],
        [sg.B('打开网页', key='-Web2-'), sg.B('分析评论', key='-Ana2-')]

    ]
    # 分析页面
    layoutc = [
        [sg.Menu(menu, size=(5, 5), key='-Mu2-')
            , sg.T('商品名称：', key='-Text01-')],
        [sg.T('什么颜色：'), sg.In(key='-Colr-')],
        [sg.T('字体大小：'), sg.In(key='-Font-')],
        [sg.B('词云展示', key='-Word-')],
        [sg.LB(
            list2,
            size=(70, 20),
            select_mode='simple',
            key='-list2-',
        )],
        [sg.B('词云分析', key='-Word2-'), sg.B('情感分析', key='-Emo-'),
         sg.B('数据分析', key='-Data-'), sg.B('返回', key='-Rern-')],
    ]
    # 图片展示
    layoutd = [
        [sg.Image(filename='', key='-Image1-')], [sg.Image(filename='', key='-Image2-')],
        [sg.B('返回', key='-Rern2-')]
    ]
    # 总页面
    layouttotal = [
        [sg.Image(filename='首页.png'), sg.Menu(menu, size=(5, 5), key='-Mu1-'),
         sg.Frame(title='商品', layout=layouta, key='-Frame1-', visible=True),
         sg.Frame(title='超链接', layout=layoutb, key='-Frame2-', visible=False)
         ]
    ]
    layout = [
        [sg.Frame(title='欢迎来到天猫商品评论分析APP', title_color='purple', font=50,
                  layout=layouttotal, title_location='n', visible=True,
                  key='-FrameT1-', relief='flat'),
         sg.Frame(title='商品评论分析', title_location='n', font=50,
                  key='-FrameT2-', layout=layoutc, visible=False, relief='flat'),
         sg.Frame(title='图片展示', title_location='n', font=50,
                  key='-FrameT3-', layout=layoutd, visible=False, relief='flat')
         ]
    ]

    # 创建GUI窗口
    window = sg.Window('天猫商品评论分析', layout, font='黑体')  # 修改图标 icon='ico.ico'

    # 判断是否爬过链接
    flag = False
    # 判断从哪个页面进入商品分析的
    flagnew = 0
    # 爬取88链接存到List中
    List = []
    # 记录已经爬取评论的商品个数，每次加5
    count = 0
    # 获取第几条链接
    var = 0
    result = []
    word_cloud_path = ''
    freq_path = ''
    while True:
        event, values = window.read()
        # print(values)
        if event == sg.WINDOW_CLOSED:
            break
        elif event == '获取不同的商品链接':  # 评论超链接显示处
            if values['-URL1-'] == '':
                sg.Popup('请输入商品！', font='黑体', text_color='red')
            else:
                List = crawlStep1(values['-URL1-'])
                # print(List)
                flag = True
                finalList = []
                for i in range(len(List)):
                    finalList.append(str(i + 1) + '.' + List[i][1])
                window['-list1-'].update(
                    values=finalList
                )
                # print(values['-URL1-'])
        elif event == '获取五个商品的评论':
            if flag == False:
                sg.Popup('请输入商品！', font='黑体', text_color='red')
            else:
                crawlStep2(sg, List)
                count += 5
                LenList = []
                for i in range(count):
                    LenList.append(str(i + 1))
                window['-URL2-'].update(
                    values=LenList
                    # no_lst=LenList
                    # value=count
                )
                sg.Popup('成功获取五个商品的评论', font='黑体')
        elif event == '-Y1-':
            if flag == False:
                sg.Popup('请输入商品！', font='黑体', text_color='red')
            else:
                tmp = values['-list1-'][0]
                x = getPosition(tmp, '.')
                var = tmp[0:x]
                window['-URL2-'].update(
                    value=var
                )
        elif event == '-Web1-':
            if flag == False:
                sg.Popup('请输入商品！', font='黑体', text_color='red')
            # values[]
            else:
                tmp1 = values['-URL2-']
                if tmp1 == '':
                    sg.Popup('未获取商品', font='黑体', text_color='red')
                elif int(tmp1) > 88:
                    sg.Popup('超过88个')
                else:
                    web.open(List[int(tmp1) - 1][0])
        elif event == '-Y2-':
            break
        elif event == '商品::-A-':
            window['-Frame1-'].update(visible=True)
            window['-Frame2-'].update(visible=False)
        elif event == '超链接::-B-':
            window['-Frame1-'].update(visible=False)
            window['-Frame2-'].update(visible=True)
        elif event == '退出::-C-':
            eventtmp = sg.popup_ok_cancel('确定要退出吗？', title='提示', font='黑体')
            if eventtmp == 'OK':
                break
        #
        elif event == '-Web2-':
            if values['-URL3-'] == '':
                sg.Popup('请输入超链接！', font='黑体', text_color='red')
            else:
                web.open(values['-URL3-'])  # 如何检查
        elif event == '-Ana2-':
            if values['-URL3-'] == '':
                sg.Popup('请输入超链接！', font='黑体', text_color='red')
            else:
                judge=commentSort2(sg, values['-URL3-'])
                if judge==True:
                    flagnew = 1
                    window['-Frame1-'].update(visible=False)
                    window['-Frame2-'].update(visible=True)
                    window['-FrameT1-'].update(visible=False)
                    window['-FrameT2-'].update(visible=True)
                    window['-FrameT3-'].update(visible=False)
        elif event == '-Ana1-':
            if flag == False:
                sg.Popup('请输入商品！', font='黑体', text_color='red')
            elif count == 0:
                sg.Popup('未获取评论！', font='黑体', text_color='red')
            elif values['-URL2-'] == '':
                sg.Popup('未获取商品！', font='黑体', text_color='red')
            elif int(values['-URL2-']) > count:
                sg.Popup(f'当前商品位置超过{count}个！', font='黑体', text_color='red')
            else:
                window['-Text01-'].update(
                    value='商品名称：' + values['-URL1-']
                )
                flagnew = 0
                window['-Frame1-'].update(visible=False)
                window['-Frame2-'].update(visible=False)
                window['-FrameT1-'].update(visible=False)
                window['-FrameT2-'].update(visible=True)
                window['-FrameT3-'].update(visible=False)


        elif event == '-Word-':
            if values['-Colr-'] == '':
                sg.Popup('请输入字体颜色！', font='黑体', text_color='red')
            elif values['-Font-'] == '':
                sg.Popup('请输入字体大小！', font='黑体', text_color='red')
            else:
                if flagnew == 0:
                    result, word_cloud_path, freq_path = generate_word_cloud("comment.csv", "stopwords.txt",
                                                                             int(values['-URL2-']),
                                                                             result_dir="wordcloud-result", top_n=100,
                                                                             font_path="./typeface/SourceHanSans-Regular.otf",
                                                                             height=1600, width=2400, scale=3,
                                                                             mode="RGBA",
                                                                             background_color='#252525',
                                                                             colormap=values['-Colr-'],
                                                                             max_font_size=int(values['-Font-']),
                                                                             random_state=42)
                elif flagnew == 1:
                    result, word_cloud_path, freq_path = generate_word_cloud("comment2.csv", "stopwords.txt",
                                                                             0,
                                                                             result_dir="wordcloud-result", top_n=100,
                                                                             font_path="./typeface/SourceHanSans-Regular.otf",
                                                                             height=1600, width=2400, scale=3,
                                                                             mode="RGBA",
                                                                             background_color='#252525',
                                                                             colormap=values['-Colr-'],
                                                                             max_font_size=int(values['-Font-']),
                                                                             random_state=42)
                if result == []:
                    sg.Popup('获取词云失败！', font='黑体', text_color='red')
                else:
                    window['-list2-'].update(
                        values=result
                    )
                    window['-Frame1-'].update(visible=False)
                    window['-Frame2-'].update(visible=False)
                    window['-FrameT1-'].update(visible=False)
                    window['-FrameT2-'].update(visible=True)
                    window['-FrameT3-'].update(visible=False)
        elif event == '-Rern-':
            window['-FrameT1-'].update(visible=True)
            window['-FrameT2-'].update(visible=False)
            window['-FrameT3-'].update(visible=False)
        elif event == '-Word2-':
            # time.sleep(20000)
            if (word_cloud_path == '') or (freq_path == ''):
                sg.Popup('未获取词云！', font='黑体', text_color='red')
            else:
                web.open(os.path.join("wordcloud-result", word_cloud_path))
                web.open(os.path.join("wordcloud-result", freq_path))


        elif event == '-Data-':
            if flagnew == 0:
                window['-Image1-'].update(
                    filename=month_analyze(List[int(values['-URL2-']) - 1][1], sg)
                )
                window['-Image2-'].update(
                    filename=count_analyze(List[int(values['-URL2-']) - 1][1], sg)
                )
                window['-FrameT1-'].update(visible=False)
                window['-FrameT2-'].update(visible=False)
                window['-FrameT3-'].update(visible=True)
            elif flagnew == 1:
                sg.Popup('抱歉，无法分析！', font='黑体', text_color='red')

        elif event == '-Emo-':
            window['-Image1-'].update(
                filename=emotion(flagnew)
            )
            window['-Image2-'].update(
                filename=''
            )
            window['-FrameT1-'].update(visible=False)
            window['-FrameT2-'].update(visible=False)
            window['-FrameT3-'].update(visible=True)
        elif event == '-Rern2-':
            window['-FrameT1-'].update(visible=False)
            window['-FrameT2-'].update(visible=True)
            window['-FrameT3-'].update(visible=False)

    window.close()

# 主函数
enter()