import streamlit as st
import pandas as pd
import hashlib
import os
import sqlite3
import subprocess
from wordcloud import WordCloud
from naive_bayes_sentiment_analyzer import SentimentAnalyzer

# TODO:
# 分析可以为两块，热点趋势（包括
# 舆情整体词云、舆情主题分类、小时微博数量分布）和
# 情感分析（正负向微博数量分布趋势，整体情感分数趋势，分地点和性别统计情感分数）

conn = sqlite3.connect('data.db')
c = conn.cursor()

model_path = './data/bayes.pkl'
userdict_path = './data/userdict.txt'
stopword_path = './data/stopwords.txt'
corpus_path = './data/weibo_senti_100k.csv.csv'

analyzer = SentimentAnalyzer(model_path=model_path, stopword_path=stopword_path, userdict_path=userdict_path)
stopwords = pd.read_csv(f"{os.path.dirname(__file__)}/data/stopwords.txt", index_col=False, quoting=3, sep="\t",
                        names=['stopword'], encoding='utf-8')

headers = {
    'authority': 'weibo.com',
    'x-requested-with': 'XMLHttpRequest',
    'sec-ch-ua-mobile': '?0',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/91.0.4472.124 Safari/537.36',
    'content-type': 'application/x-www-form-urlencoded',
    'accept': '*/*',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-mode': 'cors',
    'sec-fetch-dest': 'empty',
    'referer': 'https://weibo.com/1192329374/KnnG78Yf3?filter=hot&root_comment_id=0&type=comment',
    'accept-language': 'zh-CN,zh;q=0.9,en-CN;q=0.8,en;q=0.7,es-MX;q=0.6,es;q=0.5',
    'cookie': 'SCF=Ang1PsM9AoPOH6r2JMfsf_cG-DF5ce8cSUt-u4AhK1i5wQRp7tw8QyalBIidf_baiVUx4v-heMf80G-h4cl7hGc.; '
              'UOR=www.baidu.com,weibo.com,www.baidu.com; SINAGLOBAL=6060355668604.674.1709552585531; '
              'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWSJ_7WUJKD_-HWMMWH1YY_5JpX5KMhUgL.Fo'
              '-cS0ep1hnXe022dJLoIEBLxKqL1KnL1-qLxK-L1h-L1h.LxK-LB.-LB--LxKqLB-BLBK-t; ALF=1713856403; '
              'SUB=_2A25I-6LDDeRhGeNI7FEQ-CbIyD2IHXVoeLoLrDV8PUJbkNANLVH5kW1NSCMrRkVP6IVzNcrDusJ9Uj23rTYezxbI; '
              'WBPSESS'
              '=DZfzo8PBycpQXciQLyzIdlhjpu3GSrUjHdDzGdeTxn6_FTe0MIZZN5H_tmED0x6_DyEheg9EuSsRBmW52A16egKtc4BlG0wDJ9D9L'
              '-Jzu1cOYo4jD9dcF8Zl-2Vn_C-CGurdvBxTzka6DXiMgay_Cw==; '
              'ULV=1711264427939:3:3:1:6024224491230.599.1711264427927:1710233701739; '
              'XSRF-TOKEN=frxw5zcRw6Ufd47Z324aLgbR'
}


def make_hashes(password):
    return hashlib.sha256(str.encode(password)).hexdigest()


def check_hashes(password, hashed_text):
    if make_hashes(password) == hashed_text:
        return hashed_text
    return False


def create_usertable():
    c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT,password TEXT)')


def add_userdata(username, password):
    c.execute('INSERT INTO userstable(username,password) VALUES (?,?)', (username, password))
    conn.commit()


def login_user(username, password):
    c.execute('SELECT * FROM userstable WHERE username =? AND password = ?', (username, password))
    data = c.fetchall()
    return data


def view_all_users():
    c.execute('SELECT * FROM userstable')
    data = c.fetchall()
    return data


def set_up(keyword, start_date, end_date):
    # 打开文件
    with open("./weibo/settings.py", "r", encoding='utf-8') as file:
        # 逐行读取文件内容
        lines = file.readlines()

        # 查找要修改的行
        for i, line in enumerate(lines):
            if "KEYWORD_LIST" in line:
                lines[i] = "KEYWORD_LIST = ['" + keyword + "']\n"
            if "START_DATE" in line:
                lines[i] = "START_DATE = '" + start_date + "'\n"
            if "END_DATE" in line:
                lines[i] = "END_DATE = '" + end_date + "'\n"

        # 保存修改后的文件
        with open("./weibo/settings.py", "w", encoding='utf-8') as w_file:
            w_file.writelines(lines)


# 主页面
def home_page():
    keyword = st.text_input('关键词')
    start_date = st.text_input('开始时间', placeholder='格式：2023-04-01')
    end_date = st.text_input('结束时间', placeholder='格式：2023-04-30')
    col1, col2, col3, col4, col5 = st.columns(5)
    with col1:
        btn1 = st.button('确定')
    with col2:
        btn2 = st.button('查询')
    with col3:
        btn3 = st.button('停止')
    with col4:
        btn4 = st.button('查询附加信息')
    with col5:
        btn5 = st.button('分析')
    if btn1:
        set_up(keyword, start_date, end_date)
        st.success('设置成功')
    if btn2:
        scrapy_command = 'scrapy crawl search'
        st.success('爬取中...请稍侯...')
        process = subprocess.Popen(scrapy_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        stdout, stderr = process.communicate()
        st.session_state['process'] = process

    if btn3:
        st.session_state['process'].terminate()
        st.success('爬取完成')
    if btn4:
        msg2 = st.success('正在查询附加信息...')
        preprocess(keyword)
        msg2.empty()
        st.success('查询附加信息完成')
    if btn5:
        msg3 = st.success('正在分析中')
        analyze(keyword)
        msg3.empty()
        st.success('分析完成')


def calculate_sentiment_score(df):
    import cProfile
    import multiprocessing
    from pandarallel import pandarallel
    from SentimentCalculator import SentimentCalculator

    calculator = SentimentCalculator()
    pandarallel.initialize(progress_bar=True, nb_workers=multiprocessing.cpu_count() - 2)  # 全局设置

    def apply_sentiment_score():
        df['sentiment_score'] = df['微博正文'].apply(calculator.sentiment_score)

    cProfile.runctx("apply_sentiment_score()", globals(), locals())
    return df


# 获取额外的信息（预处理）
def preprocess(keyword):
    from InfoCrawler import WeiboInfoCrawler
    path = './结果文件/' + keyword + '/' + keyword + '.csv'
    crawler = WeiboInfoCrawler(headers=headers, path=path)
    uids = crawler.get_uid()
    proxies = crawler.get_valid_proxies(page_range=3)
    ip_location_dict, gender_dict = crawler.get_iplocation_gender_dict(uids, proxies)
    raw_data = crawler.get_raw_data(path)
    raw_data['IP_location'] = raw_data['user_id'].map(ip_location_dict)
    raw_data['gender'] = raw_data['user_id'].map(gender_dict)
    raw_data = calculate_sentiment_score(raw_data)
    raw_data.to_csv(f'./结果文件/{keyword}/{keyword}.csv', index=False, encoding='utf-8-sig')


# 省市全名的转换
def get_province_full_name(province_short_names):
    china_provinces_full_names = {
        "北京": "北京市",
        "天津": "天津市",
        "上海": "上海市",
        "重庆": "重庆市",
        "河北": "河北省",
        "山西": "山西省",
        "辽宁": "辽宁省",
        "吉林": "吉林省",
        "黑龙江": "黑龙江省",
        "江苏": "江苏省",
        "浙江": "浙江省",
        "安徽": "安徽省",
        "福建": "福建省",
        "江西": "江西省",
        "山东": "山东省",
        "河南": "河南省",
        "湖北": "湖北省",
        "湖南": "湖南省",
        "广东": "广东省",
        "海南": "海南省",
        "四川": "四川省",
        "贵州": "贵州省",
        "云南": "云南省",
        "陕西": "陕西省",
        "甘肃": "甘肃省",
        "青海": "青海省",
        "台湾": "台湾省",
        "内蒙古": "内蒙古自治区",
        "广西": "广西壮族自治区",
        "西藏": "西藏自治区",
        "宁夏": "宁夏回族自治区",
        "新疆": "新疆维吾尔自治区",
        "香港": "香港特别行政区",
        "澳门": "澳门特别行政区"
    }
    return [china_provinces_full_names.get(province) for province in province_short_names]


# 地图数据可视化
def map_visualization(df):
    from pyecharts import options as opts
    from pyecharts.charts import Map
    import streamlit.components.v1 as components
    provinces = [
        "北京", "天津", "上海", "重庆", "河北", "山西", "辽宁", "吉林", "黑龙江",
        "江苏", "浙江", "安徽", "福建", "江西", "山东", "河南", "湖北", "湖南", "广东",
        "海南", "四川", "贵州", "云南", "陕西", "甘肃", "青海", "台湾", "内蒙古", "广西",
        "西藏", "宁夏", "新疆", "香港", "澳门"
    ]
    df['rounded_sentiment_score'] = df['sentiment_score'].round()
    # 2. 根据IP_location进行分组，并计算sentiment_score的平均值
    # 3. 如果IP_location为空，则将其默认为"未知"
    df['IP_location'].fillna('未知', inplace=True)
    grouped_df = df.groupby('IP_location')['rounded_sentiment_score'].mean().reset_index()
    emotion_values = [0] * len(provinces)
    for i, province in enumerate(provinces):
        if province in grouped_df['IP_location'].values:
            emotion_values[i] = grouped_df[grouped_df['IP_location'] == province]['rounded_sentiment_score'].values[0]
    provinces = get_province_full_name(provinces)
    sentiment_values = {province: value for province, value in zip(provinces, emotion_values)}
    map_data = [(key, value) for key, value in sentiment_values.items()]
    _map = (
        Map()
        .add('情感均值', map_data, "china")
        .set_global_opts(
            title_opts=opts.TitleOpts(title="中国情感均值地图"),
            visualmap_opts=opts.VisualMapOpts(is_piecewise=True,
                                              pieces=[{"max": 0, "min": -50, "label": "-20-0", "color": "#00FFFF"},
                                                      {"max": 50, "min": 0, "label": "0-50", "color": "#FF69B4"},
                                                      {"max": 100, "min": 50, "label": "50-100", "color": "#0000FF"},
                                                      {"max": 200, "min": 100, "label": "100-200", "color": "#00BFFF"},
                                                      {"max": 500, "min": 200, "label": "200-500", "color": "#228B22"},
                                                      {"max": 1000, "min": 500, "label": "500-1000",
                                                       "color": "#FF0000"},
                                                      {"max": 1000, "min": 2000, "label": ">=1000",
                                                       "color": "#FFD700"}
                                                      ])
        )
    )
    china_emotion_map = _map.render_embed()  # 渲染为HTML
    components.html(china_emotion_map, height=600)


# 用于数据分析及可视化
def analyze(keyword):
    import matplotlib.pyplot as plt
    import pandas as pd

    path = f'./结果文件/{keyword}/{keyword}.csv'
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来显示中文标签
    plt.rcParams['axes.unicode_minus'] = False
    data = pd.read_csv(path)
    st.write(f'总共爬取到{data.shape[0]}条数据')
    st.dataframe(data)

    data['hour'] = pd.to_datetime(data['发布时间']).dt.hour
    # 按小时分组计数
    hour_counts = data.groupby('hour').size().reset_index(name='count')
    # 绘制折线图
    fig1 = plt.figure(figsize=(12, 6))
    plt.plot(hour_counts['hour'], hour_counts['count'], marker='*')
    plt.xlabel('Hour')
    plt.ylabel('Count')
    plt.title('小时段发博量')
    plt.grid(True)
    plt.tight_layout()
    # 显示图形
    st.pyplot(fig1)

    # 按 hour 分组并计算 sentiment_score 的平均值
    hourly_avg = data.groupby('hour')['sentiment_score'].mean().reset_index()
    # 绘制折线图
    fig2 = plt.figure(figsize=(12, 6))
    plt.plot(hourly_avg['hour'], hourly_avg['sentiment_score'], marker='o')
    plt.title('按小时统计的 sentiment_score 平均值')
    plt.xlabel('小时')
    plt.ylabel('情感评分平均值')
    plt.xticks(range(24))
    plt.grid(True)
    st.pyplot(fig2)

    data['sentiment'] = data['微博正文'].apply(lambda x: analyzer.analyze(x))
    # 统计正向博文和负向博文的数量
    sentiment_count = data['sentiment'].value_counts()
    fig3 = plt.figure(figsize=(10, 8))
    # 绘制饼图
    ax3 = fig3.add_subplot(111)
    ax3.pie(sentiment_count.values.tolist(), labels=sentiment_count.index.tolist(), autopct='%1.1f%%', startangle=140)
    plt.title('博文情感分布饼图')
    st.pyplot(fig3)

    # 按照hour和sentiment分组并统计博文数量
    grouped_data = data.groupby(['hour', 'sentiment']).size().unstack(fill_value=0)

    # 画图
    fig4, ax = plt.subplots(figsize=(12, 6))

    ax.plot(grouped_data.index, grouped_data['positive'], label='Positive', color='g')
    ax.plot(grouped_data.index, grouped_data['negative'], label='Negative', color='r')

    # 图表相关参数设置
    ax.set_xlabel('Hour')
    ax.set_ylabel('Number of Tweets')
    ax.set_title('Number of Tweets by Hour and Sentiment')
    ax.legend()
    ax.grid(True, which='both', linestyle='--', linewidth=0.5)
    st.pyplot(fig4)

    data['分词'] = data['微博正文'].apply(lambda x: extract_topic(x))

    # # 根据情感对微博正文进行分类
    # positive_texts = data[data['sentiment'] == 'positive']['分词'].apply(lambda x: ' '.join(x))
    # negative_texts = data[data['sentiment'] == 'negative']['分词'].apply(lambda x: ' '.join(x))
    # # 生成正面情感的词云图
    # positive_wordcloud = WordCloud(background_color='white', width=800, height=400,
    #                                font_path='C:/Windows/Fonts/msyh.ttc').generate(
    #     ' '.join(positive_texts))
    #
    # # 生成负面情感的词云图
    # negative_wordcloud = WordCloud(background_color='white', width=800, height=400,
    #                                font_path='C:/Windows/Fonts/msyh.ttc').generate(
    #     ' '.join(negative_texts))
    all_text = data['分词'].apply(lambda x: ' '.join(x))
    all_wordcloud = WordCloud(background_color='white', width=800, height=400,
                              font_path='C:/Windows/Fonts/msyh.ttc').generate(' '.join(all_text))
    # 绘制词云图
    fig5 = plt.figure(figsize=(10, 5))
    plt.imshow(all_wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.title('总体词云图')
    plt.tight_layout()
    st.pyplot(fig5)

    # 按 IP_location 分组并计算 sentiment_score 的平均值
    location_avg = data.groupby('IP_location')['sentiment_score'].mean().reset_index()

    # 绘制柱形图
    fig6 = plt.figure(figsize=(12, 6))
    plt.bar(location_avg['IP_location'], location_avg['sentiment_score'])
    plt.title('按地点统计的 sentiment_score 平均值')
    plt.xlabel('地点')
    plt.ylabel('sentiment_score 平均值')
    plt.xticks(rotation=45)
    st.pyplot(fig6)

    # 按 gender 分组并计算 sentiment_score 的平均值
    gender_avg = data.groupby('gender')['sentiment_score'].mean().reset_index()

    # 绘制柱形图
    fig7 = plt.figure(figsize=(6, 6))
    plt.bar(gender_avg['gender'], gender_avg['sentiment_score'])
    plt.title('按性别统计的 sentiment_score 平均值')
    plt.xlabel('性别')
    plt.ylabel('sentiment_score 平均值')
    plt.xticks(rotation=0)
    st.pyplot(fig7)

    # 绘制地区情感均值地图
    map_visualization(data)


# lda主题提取算法
def lda(text, stopwords, num_topics):
    import jieba
    import pandas as pd
    import gensim
    from gensim import corpora, models, similarities

    sentences = []
    text = text.replace("\n", "").replace(" ", "").replace("\t", "")
    segs = jieba.lcut(text)
    segs = filter(lambda x: len(x) > 1, segs)
    segs = [seg for seg in list(segs) if seg not in stopwords]
    # 文档分割成词语
    sentences.append(segs)
    dictionary = corpora.Dictionary(sentences)
    # 词典生成后，每个词都将被赋予一个唯一的数值型ID，这个ID就是词在词典中的位置。
    # 将文档转为稀疏矩阵
    corpus = [dictionary.doc2bow(_sentence) for _sentence in sentences]
    lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)
    wors = {}
    for topic in lda_model.print_topics():
        words = topic[1].split("+")
        for word in words:
            ss = [ii.replace(" ", "").replace("\"", "") for ii in word.split("*")]
            wors[ss[1]] = wors.get(ss[1], 0) + float(ss[0])
    wors = {x: float('%.3f' % y) for x, y in wors.items()}
    # 合并词
    data_dic = {'count': wors}
    data_df = pd.DataFrame(data_dic)
    data_df = data_df.reset_index().sort_values(by=["count"], ascending=False)
    work_type = data_df[:num_topics]["index"].values

    labels = list(work_type)
    return labels


def extract_topic(text, num_topics=5):
    return lda(text, stopwords, num_topics)


# 系统运行主函数
def main():
    st.title("微博舆情分析系统")
    menu = ["Home", "Login", "SignUp"]
    choice = st.sidebar.selectbox("Menu", menu)
    if choice == "Home":
        if 'login' in st.session_state:
            if st.session_state['login']:
                home_page()
        else:
            st.success('你必须登录才能访问。')
        # st.subheader("Home")

    elif choice == "Login":
        st.subheader("Login Section")
        username = st.sidebar.text_input("User Name")
        password = st.sidebar.text_input("Password", type='password')
        if st.sidebar.checkbox("Login"):
            create_usertable()
            hashed_pswd = make_hashes(password)
            result = login_user(username, check_hashes(password, hashed_pswd))
            if result:
                st.success("Logged In as {}".format(username))
                st.session_state["login"] = True
                task = st.selectbox("Task", ["Add Post", "Analytics", "Profiles"])
                if task == "Add Post":
                    st.subheader("Add Your Post")
                elif task == "Analytics":
                    st.subheader("Analytics")
                elif task == "Profiles":
                    st.subheader("User Profiles")
                    user_result = view_all_users()
                    clean_db = pd.DataFrame(user_result, columns=["Username", "Password"])
                    st.dataframe(clean_db)
            else:
                st.warning("Incorrect Username/Password")
    elif choice == "SignUp":
        st.subheader("Create New Account")
        new_user = st.text_input("Username")
        new_password = st.text_input("Password", type='password')
        if st.button("Signup"):
            create_usertable()
            add_userdata(new_user, make_hashes(new_password))
            st.success("You have successfully created a valid Account")
            st.info("Go to Login Menu to login")


if __name__ == '__main__':
    main()
