from master.mongo_operator import MongoHelper
import master.settings as settings
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from germansentiment import SentimentModel
from textblob_de import TextBlobDE


def export_to_xls(mongo_helper, collection, query, file_name):
    df = mongo_helper.read_mongo_to_dataframe(settings.twitter_db, collection, query)
    del df['tweet_json']
    del df['user_json']
    df.to_excel(file_name)


# input：data dict
def draw_trend_line(data):
    x = []
    y = []
    for key in sorted(data):
        x.append(int(key))
        y.append((int(data[key])))
    plt.plot(x, y, 'o-', color='r', label='Num per hour')
    plt.title('The Lasers in Three Conditions')
    plt.xlabel('time')
    plt.ylabel('count')
    plt.legend()
    plt.show()


def get_all_account():
    pass


def tweets_count_per_hour_one_day(df, date_str):
    pass


def statistics(mongo_helper, twitter_db, twitter_collection, date_column_name, start_date, end_date, keywords):
    start = datetime.strptime(start_date, "%Y-%m-%d")
    end = datetime.strptime(end_date, "%Y-%m-%d")
    model = SentimentModel()

    while True:
        first = 0
        n = 5000
        stat_list = []
        # 当前日期小于等于终止日期，继续循环，否则退出循环
        if start <= end:
            stat_data = {}
            date = start.strftime("%Y-%m-%d")
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "  正在计算 【" + date + "】 数据：")
            stat_data['date'] = date
            query = {date_column_name: date}
            query["full_text"] = {"$regex": '.*' + keywords + '.*'}
            # print(query)
            # 获取N条筛选后的数据
            count = 0
            reply_count = 0
            retweet_count = 0
            quote_count = 0
            favorite_count = 0
            senti_dict = {'positive': 0, 'negative': 0, 'neutral': 0}
            # 对每天的数据循环，每次取出n个数据进行统计
            # print("未去重！！！！！！！！")
            while True:
                df = pd.DataFrame(mongo_helper.read_continuous_n_data_to_list(twitter_db, twitter_collection, first, n, query))
                # 分别统计四个参数
                for row in df.itertuples():
                    count += 1
                    reply_count += int(getattr(row, 'reply_count'))
                    retweet_count += int(getattr(row, 'retweet_count'))
                    quote_count += int(getattr(row, 'quote_count'))
                    favorite_count += int(getattr(row, 'favorite_count'))
                    sentiment = model.predict_sentiment([str(getattr(row, 'full_text'))])[0]
                    senti_dict[sentiment] += 1
                stat_data['count'] = count
                stat_data['reply_count'] = reply_count
                stat_data['retweet_count'] = retweet_count
                stat_data['quote_count'] = quote_count
                stat_data['favorite_count'] = favorite_count
                for item in senti_dict.keys():
                    stat_data[item] = senti_dict[item]
                if df.shape[0] < n:
                    break
                else:
                    first += n
                    print('当前数据起点  ' + str(first))
            print(stat_data)
            stat_list.append(stat_data)
            start = start + timedelta(days=1)
        else:
            break

    # while True:
    #     dt = mongo_helper.read_continuous_n_data_to_list(twitter_db, twitter_collection, start, n)
    #     df = pd.DataFrame(dt)
    #     del df['_id']
    #     # print(df)
    #     # df.to_excel("2.xls")
    #     for item in df['uid']:
    #         user_set.add(item)
    #     # print(user_set)
    #
    #     if df.shape[0] < n:
    #         break
    #     else:
    #         start += n
    # print(start)
    #
    # hour_count = {}
    # count = 0
    # s_count = 0
    # for row in df.itertuples():
    #
    #     count += 1
    #     if str(getattr(row, 'date')) == date_str:
    #         s_count += 1
    #         hour = str(getattr(row, 'time')).split(':')[0]
    #         if hour in hour_count.keys():
    #             hour_count[hour] = hour_count[hour] + 1
    #         else:
    #             hour_count[hour] = 1
    # print(hour_count)
    # print(count)
    # print(s_count)
    # draw_trend_line(hour_count)


# 统计限定时间内筛选过的每日推特数量
def tweets_per_day(twitter_db, collection, date_column_name, start_date, end_date, keywords):
    print("正在统计 去重 后的每日推特数量..........")
    count_dict = {}
    start = datetime.strptime(start_date, "%Y-%m-%d")
    end = datetime.strptime(end_date, "%Y-%m-%d")
    while True:
        if start <= end:
            date = start.strftime("%Y-%m-%d")
            query = {date_column_name: date}
            query["full_text"] = {"$regex": '.*' + keywords + '.*'}
            count_dict[date] = len(mongo_helper.column_distinct(twitter_db, collection, "tweet_id", query))
            start = start + timedelta(days=1)
        else:
            break
    print(count_dict)


# 速度慢，只给出【积极】【中性】【消极】三种分类，较准确
def german_sentiment(text_list):
    model = SentimentModel()
    result = model.predict_sentiment(text_list)
    # print(result)
    return result


def TextBlobDE_sentiment(text):
    texts = ["Jüngere Kandidat*innen heißt mehr Kämpfer*innen für Generationengerechtigkeit bei Klimaschutz, Renten und Investitionen, Und mit Grün gibt’s natürlich die meisten dafür!",
             "Mit keinem guten Ergebniss.", "Das ist gar nicht mal so gut.",
             "Total awesome!", "nicht so schlecht wie erwartet.",
             "Der Test verlief positiv.", "Sie fährt ein grünes Auto."]
    str_txt = " ".join(texts)
    blob = TextBlobDE(text)
    for sent in blob.sentences:
        a = sent.sentiment
        print(sent.sentiment.polarity)


if __name__ == '__main__':
    mongo_helper = MongoHelper(settings.mongodb_host, settings.mongodb_port)
    # collection = '2021-08-23_2021-08-29_laschet'
    # collection = "2021_Bündnis 90,Die Grünen,GRÜNE"
    collection = "2021_baerbock"
    query = {"created_date": {"$gte": '2021-08-30', "$lt": '2021-09-01'}}
    export_to_xls(mongo_helper, collection, query, "9.xlsx")
    # german_sentiment()
    # tweets_per_day(settings.twitter_db, collection, "created_date", "2021-08-23", "2021-08-29", "")
    # statistics(mongo_helper, settings.twitter_db, collection, "created_date", "2021-08-23", "2021-08-29", "")
    #
    # texts = ["Jüngere Kandidat*innen heißt mehr Kämpfer*innen für Generationengerechtigkeit bei Klimaschutz, Renten und Investitionen, Und mit Grün gibt’s natürlich die meisten dafür!",
    #          "Mit keinem guten Ergebniss.", "Das ist gar nicht mal so gut.",
    #          "Total awesome!", "nicht so schlecht wie erwartet.",
    #          "Der Test verlief positiv.", "Sie fährt ein grünes Auto."]
    # TextBlobDE_sentiment(" ".join(texts))








# https://twitter.com/i/api/2/search/adaptive.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&q=baerbock%20since%3A2021-08-31%20until%3A2021-09-01&tweet_search_mode=live&count=20&query_source=typed_query&pc=1&spelling_corrections=1&ext=mediaStats%2ChighlightedLabel%2CvoiceInfo


# scroll%3AthGAVUV0VFVBaCwLCxisqg4icWgsC4_aaiw-InEnEVnLt4FYCJehgEVVNFUjUBFYIBFQAA
# scroll%3AthGAVUV0VFVBaCwLCxisqg4icWgsC4_aaiw-InEnEVsLt4FYCJehgEVVNFUjUBFYIBFQAA

