from collections import Counter
import jieba
from flask import Flask, render_template, request
from snownlp import SnowNLP
from select_database import query_database,query
import copy
import numpy as np
import random
from sklearn.linear_model import LinearRegression
import time

app = Flask(__name__)

# 回归预测
def predict_linear():
    # 预测变量
    y = get_history_data()["price"].tolist()
    y = [float(i) for i in y]

    # 输出变量
    x = [i for i in range(1, len(y) + 1)]
    x = np.array(x).reshape((-1, 1))

    model = LinearRegression()

    model.fit(x, y)
    model = LinearRegression().fit(x, y)

    y_pred = list(model.predict(x))
    return y_pred

# 感情分析和商品价格区间
def get_data(df, df_con, path):
    # 评论情感分析
    comments = df_con['con']  # 提取评论内容列
    # 对每条评论进行情感分析，并统计积极、消极、中立数量
    positive_count = 0
    negative_count = 0
    neutral_count = 0
    for comment in comments:
        s = SnowNLP(comment)
        if s.sentiments > 0.6:
            positive_count += 1
        elif s.sentiments < 0.4:
            negative_count += 1
        else:
            neutral_count += 1

    # 商品价格区间分布
    data_res = [[], [], [], [], [], [], [], [], [], []]
    for data in df['price'].values.tolist():
        # print(data)
        if data <= 1000:
            data_res[0].append(data)
        if 1000 < data <= 2000:
            data_res[1].append(data)
        if 2000 < data <= 3000:
            data_res[2].append(data)
        if 3000 < data <= 4000:
            data_res[3].append(data)
        if 4000 < data <= 5000:
            data_res[4].append(data)
        if 5000 < data <= 10000:
            data_res[6].append(data)
        if 10000 < data <= 20000:
            data_res[7].append(data)
        if 20000 < data <= 50000:
            data_res[8].append(data)
        if 50000 < data:
            data_res[9].append(data)

    if path is None:
        path = '综合'
    data_col = [f'{path}价格0~1000元',
                f'{path}价格1000~2000元',
                f'{path}价格2000~3000元',
                f'{path}价格3000~4000元',
                f'{path}价格4000~5000元',
                f'{path}价格5000~10000元',
                f'{path}价格10000~20000元',
                f'{path}价格20000~50000元',
                f'{path}价格50000元以上的', ]
    data_num = [len(i) for i in data_res]
    data_price_interval = []
    for key, value in zip(data_col, data_num):
        data_price_interval.append({'name': key, 'value': value})

    return positive_count, negative_count, neutral_count, data_price_interval

# 获取商品的历史价格用于预测
def get_history_data():
    query = "select * from 历史价格  ORDER BY id  limit 1,20"
    df = query_database(query)
    return df

def getWordCloud(df):
    # 词云图数据处理
    titles = df['title'].tolist()  # 提取评论内容列
    # print(titles)
    # 加载停用词表
    stopwords = set()
    with open(r'./StopWords.txt', 'r', encoding='utf-8') as f:
        for line in f:
            stopwords.add(line.strip())
    # 将数据进行分词并计算词频
    words = []
    for item in titles:
        if item:
            words += jieba.lcut(item.replace(' ', ''))
    word_counts = Counter([w for w in words if w not in stopwords])
    # 获取词频最高的词汇
    top20_words = word_counts.most_common()
    words_data = []
    for word in top20_words:
        words_data.append({'name': word[0], 'value': word[1]})

    return words_data

def getMapData(df_addr):
    # 地图销量数据处理
    addr = df_addr[['发货地', '销量']]
    addr_data = addr.groupby('发货地')['销量'].sum()
    map_data = []
    addr_dict = {}
    for key, value in addr_data.to_dict().items():
        key = key.split(' ')[0]
        if addr_dict.get(key):
            addr_dict[key] += value
        else:
            addr_dict[key] = value
    for key, value in addr_dict.items():
        map_data.append({'name': key, 'value': value})
    return map_data

def get_sal_pri_data(df2):
    sal_pri_data = []
    df2.dropna(inplace=True)
    df2['sales'] = df2['sales'].str.replace('+', '',regex=True)
    df2['sales'] = df2['sales'].str.replace('万', '0000').astype(int)
    df2 = df2.sort_values(by='price')[['type', 'sales', 'price']]
    # 将sales转换为整数类型
    df2['sales'] = df2['sales'].astype(int)
    # 过滤sales为0或为空的值
    data = df2[(df2['sales'] != 0) & (df2['sales'].notnull())]
    # 过滤价格超过五万的数据
    data = data[data['price'] <= 10000]
    # 按类型和价格分组，计算销量总和
    grouped_data = data.groupby(['type', 'price'])['sales'].sum().reset_index()
    for t in grouped_data['type'].unique():
        type_data = grouped_data[grouped_data['type'] == t][['price', 'sales']]
        sal_pri_data.append({'name': t, 'data': type_data.values.tolist()})

    return sal_pri_data

def getSails(df):
    sales_df = df[['type', 'sales']].values.tolist()
    data_dict = {}
    for i in sales_df:
        key = i[0]
        value = '0'
        if i[1]:
            value = i[1].replace('万', '0000').replace('+', '')
        if data_dict.get(key):
            data_dict[key] += int(value)
        else:
            data_dict[key] = int(value)

    sales_data = []
    sales_key = []
    for key, value in data_dict.items():
        sales_key.append(key)
        sales_data.append({'name': key, 'value': value})

    return sales_key,sales_data

def getTopShop(df):
    shop_df = df[['shop', 'sales']].values.tolist()
    data_dict = {}
    for i in shop_df:
        key = i[0]
        value = '0'
        if i[1]:
            value = i[1].replace('万', '0000').replace('+', '')
        if data_dict.get(key):
            data_dict[key] += int(value)
        else:
            data_dict[key] = int(value)
    shop_data = []
    for key, value in data_dict.items():
        shop_data.append({'name': key, 'value': value})
    sorted_list = sorted(shop_data, key=lambda x: x['value'], reverse=True)[:10]
    shop_key = []
    for s in sorted_list:
        shop_key.append(s['name'][:-7])

    return shop_key,sorted_list

# 页面接口
@app.route('/')
def index():
    T1 = time.time()

    path = request.args.get('path')
    if path == '正面lda分析':
        return render_template('正面-lda.html')
    elif path == '负面lda分析':
        return render_template('负面-lda.html')

    global df,df_con

    global  words_data,map_data

    global sal_pri_data

    global sales_data,sales_key

    global sorted_list,shop_key,total

    path = request.args.get('path')
    if path:
        # 使用 .loc 方法筛选出各类商品所在的行
        df = df.loc[df['type'] == path]
        if path == '冰箱' or path == '燃气灶':
            df_con = df_con.loc[df_con['类别'] == '热水器']
        elif path == '洗衣机':
            df_con = df_con.loc[df_con['类别'] == '抽油烟机']
        else:
            df = df.loc[df['type'] == path]
    positive_count, negative_count, neutral_count, data_price_interval = get_data(df, df_con, path)

    T2 = time.time()

    # 预测模型预测未来数据
    ans = predict_linear()
    ans = [(num + random.random() * 0.2 * num) for num in ans]


    data = {
        # 情感倾向返回数据
        '积极': positive_count,
        '消极': negative_count,
        '中性': neutral_count,
        # 词云图返回数据
        'words_data': words_data,
        # 地图销量数据
        'map_data': map_data,
        # 销量与价格的关系
        'sal_pri_data': sal_pri_data,
        # 商品价格区间分布
        'data_price_interval': data_price_interval,
        # 商品销量分布情况
        'sales_key': sales_key,
        'sales_data': sales_data,
        # 销售额top20店铺
        'shop_data': sorted_list,
        'shop_key': shop_key,
        # 销售额
        'total': total,
        'num': len(df['sales']),
        'price' : ans,
    }

    # 将结果传递到前端
    print('程序运行时间:%s毫秒' % ((T2 - T1) * 1000))

    return render_template('index.html', data=data)


if __name__ == '__main__':
    # 读取数据库
    ans = query()
    df = ans[0]
    df2 = copy.deepcopy(df)
    df_con = ans[1]
    df_addr = ans[2]

    # 加载词云数据
    words_data = getWordCloud(df)

    # 加载地图数据
    map_data = getMapData(df_addr)

    # 获得销量与价格的关系
    sal_pri_data = get_sal_pri_data(df2)

    # 商品销量分布情况
    sales_key,sales_data = getSails(df)

    # 得到排名前十的店铺
    shop_key, sorted_list = getTopShop(df)

    # 销售额
    data5 = df['sales'].tolist()
    total = 0
    for i in data5:
        if i: total += int(i.replace('万', '').replace('+', ''))



    app.run(debug=True, port=8081)
