import os
from math import floor
import openpyxl
from mongo_operator import MongoHelper
import settings as settings
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from germansentiment import SentimentModel


# from textblob_de import TextBlobDE  {"gte":"00:00:00","lte":"00:19:59"}


def write_excel_xlsx(path, sheet_name, value):
    index = len(value)
    workbook = openpyxl.Workbook()
    sheet = workbook.active
    sheet.title = sheet_name
    for i in range(index):
        for j in range(len(value[i])):
            sheet.cell(row=i + 1, column=j + 1, value=value[i][j])
    os.makedirs(os.path.dirname(path), exist_ok=True)
    workbook.save(path)


def export_to_xls(mongo_helper, target_db, collection, query, file_name):
    df = mongo_helper.read_mongo_to_dataframe(target_db, collection, query)
    print(df.shape)
    del df['tweet_json']
    # del df['user_json']
    df.to_excel(file_name)


# input：data dict
def draw_trend_line(data):
    x = []
    y = []
    for key in sorted(data):
        x.append(int(key))
        y.append((int(data[key])))
    plt.plot(x, y, 'o-', color='r', label='Num per hour')
    plt.title('The Lasers in Three Conditions')
    plt.xlabel('time')
    plt.ylabel('count')
    plt.legend()
    plt.show()


def get_all_account():
    pass


# def tweets_count_per_hour_one_day(twitter_db, twitter_collection, date_column_name, date_str):
#     print("正在统计 去重 后的单日推特分布..........")
#     count_dict = []
#     query = {date_column_name: date_str}
#
#     first = 0
#     n = 2000
#     # 对每天的数据循环，每次取出n个数据进行统计
#     for i in range(145):
#         count_dict.append(0)
#     while True:
#         df = pd.DataFrame(mongo_helper.read_continuous_n_data_to_list(twitter_db, twitter_collection, first, n, query))
#         # 分别统计四个参数
#         for row in df.itertuples():
#             pub_time = str(getattr(row, 'created_time'))
#             pub_datetime = datetime.strptime(date_str + " " + pub_time, "%Y-%m-%d %H:%M:%S")
#             minute = (pub_datetime - datetime.strptime(date_str + " 00:00:00", "%Y-%m-%d %H:%M:%S")).seconds/1200
#             count_dict[floor(minute)] += 1
#         if df.shape[0] < n:
#             break
#         else:
#             first += n
#             print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '已完成  ' + str(first))
#     for i in range(len(count_dict)):
#         print(count_dict[i])
# print(count_dict)


def statistics(mongo_helper, db_collection_list, date_column_name, start_date, end_date, keywords):
    model = SentimentModel()
    for db_collection in db_collection_list:
        print("当前数据库：【" + db_collection["db"] + "】 当前数据集：【" + db_collection['collection'] + "】")
        start = datetime.strptime(start_date, "%Y-%m-%d")
        end = datetime.strptime(end_date, "%Y-%m-%d")
        stat_list = []
        while True:
            first = 0
            n = 2000
            # 当前日期小于等于终止日期，继续循环，否则退出循环
            if start <= end:
                stat_data = {}
                date = start.strftime("%Y-%m-%d")
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "  正在计算 【" + date + "】 数据：")
                stat_data['date'] = date
                query = {date_column_name: date}
                query["full_text"] = {"$regex": '.*' + keywords + '.*'}
                # print(query)
                # 获取N条筛选后的数据
                count = 0
                reply_count = 0
                retweet_count = 0
                quote_count = 0
                favorite_count = 0
                senti_dict = {'positive': 0, 'negative': 0, 'neutral': 0}
                # 对每天的数据循环，每次取出n个数据进行统计
                # print("未去重！！！！！！！！")
                while True:
                    df = pd.DataFrame(mongo_helper.read_continuous_n_data_to_list(db_collection["db"], db_collection['collection'], first, n, query))
                    # 分别统计四个参数
                    for row in df.itertuples():
                        # 只计算语言是德语的推文
                        if "de" == str(getattr(row, 'lang')):
                            count += 1
                            reply_count += int(getattr(row, 'reply_count'))
                            retweet_count += int(getattr(row, 'retweet_count'))
                            quote_count += int(getattr(row, 'quote_count'))
                            favorite_count += int(getattr(row, 'favorite_count'))
                            sentiment = model.predict_sentiment([str(getattr(row, 'full_text'))])[0]
                            senti_dict[sentiment] += 1
                    stat_data['tweet_count'] = count
                    stat_data['reply_count'] = reply_count
                    stat_data['retweet_count'] = retweet_count
                    stat_data['quote_count'] = quote_count
                    stat_data['favorite_count'] = favorite_count
                    for item in senti_dict.keys():
                        stat_data[item] = senti_dict[item]
                        if int(stat_data['tweet_count']) > 0:
                            stat_data[item + "_percent"] = str(format(int(senti_dict[item]) * 100 / int(stat_data['tweet_count']), '.2f')) + '%'
                        else:
                            stat_data[item + "_percent"] = "0%"
                    if df.shape[0] < n:
                        break
                    else:
                        first += n
                        # print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '已完成  ' + str(first))
                print(stat_data)
                if len(stat_list) == 0:
                    stat_list.append([key for key in stat_data.keys()])
                stat_list.append([stat_data[key] for key in stat_data.keys()])
                start = start + timedelta(days=1)
            else:
                break
        # print(stat_list)
        if len(stat_list) > 0:
            date_range = datetime.strptime(start_date, "%Y-%m-%d").strftime("%m%d") + "-" + end.strftime("%m%d")
            write_excel_xlsx("./de_results/" + date_range + "_" + db_collection['collection'] + ".xls", "1", [[row[col] for row in stat_list] for col in range(len(stat_list[0]))])


# 统计限定时间内筛选过的每日推特数量
def tweets_per_day(twitter_db, collection, start_date, end_date, keywords):
    print("正在统计 去重 后的每日推特数量..........")
    count_dict = {}
    start = datetime.strptime(start_date, "%Y-%m-%d")
    end = datetime.strptime(end_date, "%Y-%m-%d")
    while True:
        if start <= end:
            first = 0
            n = 2000
            date = start.strftime("%Y-%m-%d")
            query = {"created_date": date}
            # query["full_text"] = {"$regex": '.*' + keywords + '.*'}
            count_dict[date] = mongo_helper.execute_count_query(twitter_db, collection, query)
            start = start + timedelta(days=1)
        else:
            break
    print(count_dict)


# 统计限定时间内筛选过的每日推特数量
def tweets_count_per_hour_one_day(twitter_db, collection, date_column_name, start_date, end_date, keywords):
    print("正在统计每日推特的日均分布数量..........")
    tweet_count_list = []
    span = 1200
    span_count = 6#int(3600*24/1200)
    tweet_count_list.append([i for i in range(span_count + 1)])
    start = datetime.strptime(start_date, "%Y-%m-%d")
    end = datetime.strptime(end_date, "%Y-%m-%d")
    while True:
        if start <= end:
            date = start.strftime("%Y-%m-%d")
            next_time = start
            day_count = [date]
            while True:
                time_start = next_time.strftime("%H:%M:%S")
                next_time = next_time + timedelta(seconds=span)
                # 超出一天时限
                if next_time.day > start.day:
                    time_end = "23:59:59"
                else:
                    time_end = (next_time + timedelta(seconds=-1)).strftime("%H:%M:%S")
                # print(time_start)
                query = {date_column_name: date, "created_time": {"$gte": time_start, "$lte": time_end}}
                tweet_count = mongo_helper.execute_count_query(twitter_db, collection, query)
                day_count.append(tweet_count)
                if "23:59:59" == time_end:
                    break
                if len(day_count) > span_count:
                    break
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '已完成  ' + date)
            start = start + timedelta(days=1)
            tweet_count_list.append(day_count)
        else:
            break
    file_name = "1.xls"
    print(tweet_count_list)
    write_excel_xlsx("./" + file_name, "1", [[row[col] for row in tweet_count_list] for col in range(len(tweet_count_list[0]))])
    print("已保存到" + file_name)


# 速度慢，只给出【积极】【中性】【消极】三种分类，较准确
def german_sentiment(text_list):
    model = SentimentModel()
    result = model.predict_sentiment(text_list)
    # print(result)
    return result


# def TextBlobDE_sentiment(text):
#     texts = ["Jüngere Kandidat*innen heißt mehr Kämpfer*innen für Generationengerechtigkeit bei Klimaschutz, Renten und Investitionen, Und mit Grün gibt’s natürlich die meisten dafür!",
#              "Mit keinem guten Ergebniss.", "Das ist gar nicht mal so gut.",
#              "Total awesome!", "nicht so schlecht wie erwartet.",
#              "Der Test verlief positiv.", "Sie fährt ein grünes Auto."]
#     str_txt = " ".join(texts)
#     blob = TextBlobDE(text)
#     for sent in blob.sentences:
#         a = sent.sentiment
#         print(sent.sentiment.polarity)


if __name__ == '__main__':
    mongo_helper = MongoHelper(settings.mongodb_host, settings.mongodb_port)
    # collection = '2021-08-23_2021-08-29_laschet'
    # collection = "2021_Bündnis 90,Die Grünen,GRÜNE"

    s = """
    {"db": election_db, "collection": "2021_laschet"}
    ,{"db": election_db, "collection": "2021_scholz"}
                           , {"db": election_db, "collection": "2021_baerbock"}
                           , {"db": election_db, "collection": "2021_cdu"}
                           , {"db": election_db, "collection": "2021_spd"}
                           ,{"db": election_db, "collection": "2021_fdp"}
                           , {"db": election_db, "collection": "2021_afd"}
                           ,   {"db": election_db, "collection": "2021_die linke"}
                           , {"db": election_db, "collection": "2021_die grünen"}
                           
    """
    twitter_db = "twitter_db"
    election_db = "de_election"
    election_history_db = "de_election_history"
    collection = "2021_05_china,chinese"
    # db_collection_list = [{"db": election_db, "collection": "2021_afd"}]
    db_collection_list = [{"db": election_db, "collection": "2021_afd"}
        , {"db": election_db, "collection": "2021_die linke"}
        , {"db": election_db, "collection": "2021_die grünen"}
                          ]
    # query = {"created_date": {"$gte": '2021-09-01', "$lte": '2021-09-12'}}
    # query = {"created_time": {"$gte": '00:00:00', "$lt": '00:05:00'}}
    # {'type': 'SEARCH', 'db': 'twitter_db', 'collection': '2021_china,chinese', 'user_info': '2021_china,chinese_userinfo', 'url': 'https://twitter.com/i/api/2/search/adaptive.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&q=%28china%20OR%20chinese%29%20since%3A2021-05-22%20until%3A2021-05-23&tweet_search_mode=live&count=20&query_source=typed_query&pc=1&spelling_corrections=1&ext=mediaStats%2ChighlightedLabel%2CvoiceInfo', 'referer': 'https://twitter.com/search?q=%28china%20OR%20chinese%29%20since%3A2021-05-22%20until%3A2021-05-23&src=typed_query&f=live', 'start_date': '1000-01-01'}

    # ********************************************************************

    # export_to_xls(mongo_helper, election_db, collection, query, "92.xlsx")
    # txt = ['Politik: Von Reichen das Geld, von Armen die Stimmen, beides unter dem Vorwand, die einen vor den anderen zu schützen. #cdu #spd #fdp #grüne']
    # print(german_sentiment(txt))

    # print(sorted(mongo_helper.column_distinct(settings.twitter_db, collection, "created_date", query)))
    # tweets_per_day(twitter_db, collection, "2021-08-01", "2021-08-31", "")
    tweets_count_per_hour_one_day(twitter_db, collection, "created_date", "2021-05-01", "2021-05-31", "")

    # statistics(mongo_helper, db_collection_list, "created_date", "2017-07-12", "2017-07-21", "")


# 2021_08_china,chinese 08-11,
# 2021_07_china,chinese 07-02~~~~07-13,0719,0726,0728