#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2021/10/30 15:09
# @Author  : CHEN Wang
# @Site    : 
# @File    : alternative_data_update.py
# @Software: PyCharm

"""
脚本说明: 计算更新另类数据，如舆情数据
"""

import os
import time
import numpy as np
import pandas as pd
from retrying import retry
from pytrends.request import TrendReq
from quant_researcher.quant.project_tool.localize import DATA_DIR
from quant_researcher.quant.project_tool.time_tool import get_today, date_shifter, calc_date_diff
from quant_researcher.quant.datasource_fetch.crypto_api.crypto_news import get_aicoin_bulletin, get_jinse_bulletin, get_jinse_headlines
from quant_researcher.quant.datasource_fetch.crypto_api.sanbase.sanbase import get_social_metrics_list, get_indicators_via_http
from logger import logger
import os
import traceback
from task_monitor import task_to_db, send_error_to_email

social_metrics = get_social_metrics_list(asset='bitcoin')


@retry(stop_max_attempt_number=3)
def update_social_metrics(asset='bitcoin', metric=None, start_date=None, end_date=None):
    """
    每日更新舆情指标

    :param str asset: 资产代码
    :param str metric: 舆情指标名称
    :param str start_date: '2014-01-01'
    :param str end_date: '2014-01-01'
    :return:
    """

    if end_date is None:
        end_date = get_today(marker='with_n_dash')  # 计算截止昨日收盘

    file_path = os.path.join(DATA_DIR, f'social_data')
    os.makedirs(file_path, exist_ok=True)

    recent_df = get_indicators_via_http(indic_name=metric, asset=asset, start_date=start_date, end_date=end_date)
    if end_date in recent_df.index:
        recent_df.drop(index=end_date, inplace=True)  # 剔除最后一行, 最新一天的数据不完整
    file_name = os.path.join(file_path, f'{metric}')
    if os.path.exists(f'{file_name}.csv'):
        origin_factor_df = pd.read_csv(f'{file_name}.csv', index_col='end_date')
        # origin_factor_df = origin_factor_df.loc[start_date:end_date, :]
        # origin_factor_df.reset_index(inplace=True)
        # recent_df.reset_index(inplace=True)
        # all_df = pd.merge(recent_df, origin_factor_df, how='outer', on=['end_date', f'{metric}'])
        # all_df.drop_duplicates(subset=['end_date'], inplace=True)
        # all_df.set_index('end_date', inplace=True)
        # all_df = all_df.loc[all_df[all_df[f'{metric}'] != 0].index[0]:, :]  # 剔除指标最开始的0
        all_df = pd.concat([origin_factor_df.iloc[:-1, ], recent_df.loc[origin_factor_df.index[-1]:, ]])
        all_df.sort_index(inplace=True)
        all_df.to_csv(f'{file_name}.csv')
    else:
        if not recent_df[recent_df[f'{metric}'] != 0].empty:  # 如果数据全为空，就不剔除
            recent_df = recent_df.loc[recent_df[recent_df[f'{metric}'] != 0].index[0]:, :]  # 剔除指标最开始的0
        recent_df.sort_index(inplace=True)
        recent_df.to_csv(f'{file_name}.csv')


def get_relative_sentiment_data(start_date=None, end_date=None):
    file_path = os.path.join(DATA_DIR, f'social_data')
    os.makedirs(file_path, exist_ok=True)

    # 计算相对舆情指标
    # relative_sentiment_twitter, relative_sentiment_telegram, relative_sentiment_reddit, relative_sentiment_bitcointalk
    for channel in ['twitter', 'telegram', 'reddit', 'bitcointalk']:
        file_name1 = os.path.join(file_path, f'sentiment_positive_{channel}')
        sentiment_positive = pd.read_csv(f'{file_name1}.csv', index_col='end_date')
        sentiment_positive = sentiment_positive.loc[start_date:end_date, :]
        file_name1 = os.path.join(file_path, f'sentiment_negative_{channel}')
        sentiment_negative = pd.read_csv(f'{file_name1}.csv', index_col='end_date')
        sentiment_negative = sentiment_negative.loc[start_date:end_date, :]
        relative_sentiment = (sentiment_positive[f'sentiment_positive_{channel}'] -
                              sentiment_negative[f'sentiment_negative_{channel}']) / \
                             (sentiment_positive[f'sentiment_positive_{channel}'] +
                              sentiment_negative[f'sentiment_negative_{channel}'])
        relative_sentiment.name = f'relative_sentiment_{channel}'
        relative_sentiment.fillna(0, inplace=True)
        file_name1 = os.path.join(file_path, f'relative_sentiment_{channel}')
        relative_sentiment.to_csv(f'{file_name1}.csv')


def get_google_trends(keywords=['cryptocurrency']):
    """
    获取keywords的谷歌趋势，并分析谷歌趋势的回撤情况

    :param keywords:
    :return:
    """

    file_path = os.path.join(DATA_DIR, f'social_data')
    if sum([True for i in keywords if '/m/' in i]):
        topics = [i.split('/')[-1] for i in keywords]
        file_name = os.path.join(file_path, f'google_trends_{topics}')
        logprice_file_name = os.path.join(file_path, f'google_trends_{topics}_log_prices')
    else:
        file_name = os.path.join(file_path, f'google_trends_{keywords}')
        logprice_file_name = os.path.join(file_path, f'google_trends_{keywords}_log_prices')

    if os.path.exists(f'{file_name}.xlsx'):
        history_df = pd.read_excel(f'{file_name}.xlsx')
        analysis_enddate = date_shifter(before=history_df.iloc[-1, 0], step='days', how_many=1)
    else:
        analysis_enddate = '2017-11-05'
    analysis_startdate = date_shifter(before=analysis_enddate, step='years', how_many=-5)
    today = get_today(marker='with_n_dash')

    pytrends = TrendReq(hl='en-US', tz=0, timeout=(60, 60), retries=15, backoff_factor=0.1)
    maxdrawdown = pd.Series(dtype=int)
    recover = pd.Series(dtype=int)
    while calc_date_diff(analysis_enddate, today) > 7:
        print(f'开始获取{analysis_enddate}时的搜索量数据')
        pytrends.build_payload(keywords, cat=0, timeframe=f'{analysis_startdate} {analysis_enddate}', geo='', gprop='')
        related_queries = pytrends.related_queries()
        related_queries.values()
        # Related Topics, returns a dictionary of dataframes
        related_topic = pytrends.related_topics()
        related_topic.values()

        data = pytrends.interest_over_time()  # 默认返回的是周频数据
        record_date = date_shifter(before=analysis_enddate, step='days', how_many=6)

        temp = data.iloc[-52:, 0]
        if (analysis_enddate < '2018-12-02') or ('2021-01-03' <= analysis_enddate <= '2022-07-31'):  # 这段时间主要是分析搜索量回撤
            if max(temp) < 30:  # 近一年搜索量都很低，无需分析
                maxdrawdown[record_date] = 0
            else:
                drawdown = (temp[-1] - max(temp)) / max(temp)
                maxdrawdown[record_date] = drawdown
            recover[record_date] = 0
        else:  # 这段时间主要是分析搜索量恢复情况
            if temp[-1] > 15:  # 搜索量绝对值恢复一点，看看跟历史低点比恢复了多少
                recover_ratio = (temp[-1] - min(temp)) / min(temp)
                recover[record_date] = recover_ratio
            else:
                recover[record_date] = 0
            maxdrawdown[record_date] = 0

        analysis_enddate = date_shifter(before=analysis_enddate, step='weeks', how_many=1)
        analysis_startdate = date_shifter(before=analysis_enddate, step='years', how_many=-5)
        time.sleep(5)

    maxdrawdown.name = 'Drawdown'
    maxdrawdown.index.name = 'date'
    recover.name = 'Recover'
    recover.index.name = 'date'
    new_df = pd.concat([maxdrawdown, recover], axis=1)
    new_df = new_df.reset_index()
    if os.path.exists(f'{file_name}.xlsx'):
        all_df = pd.concat([history_df, new_df], axis=0)
        all_df.drop_duplicates(subset=['date'], inplace=True)
    else:
        all_df = new_df
    all_df.to_excel(f'{file_name}.xlsx', index=False)

    file_name1 = os.path.join(DATA_DIR, f'BTC_history_ohlcvm')
    ohlcv_data = pd.read_excel(f'{file_name1}.xlsx', index_col='end_date')
    prices_df = ohlcv_data[['close']]
    prices_df['log_price'] = np.log10(prices_df['close'])
    prices_df = prices_df.reset_index()

    all_df_log_prices = all_df.merge(prices_df, left_on='date', right_on='end_date', how='left')
    all_df_log_prices.to_excel(f'{logprice_file_name}.xlsx', index=False)

    # import plotly.express as px
    # fig = px.line(all_maxdrawdown, x="date", y=['Drawdown'], title='Keyword Web Search Interest Over Time')
    # fig.show()

def get_jinse_crypto_news():
    file_path = os.path.join(DATA_DIR, 'social_data')
    # 获取金色财经快讯数据
    print('获取jinse_bulletin')
    all_news_df = get_jinse_bulletin(update=True)
    file_name = os.path.join(file_path, f'all_jinse_bulletin_raw')
    all_news_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)

    news_df = all_news_df[['date', 'time', 'datetime', 'content_prefix', 'content', 'up_counts', 'down_counts']]
    news_df['year'] = news_df['date'].str[:4]
    for group in news_df.groupby(by='year'):
        file_name = os.path.join(file_path, f'all_jinse_bulletin_{group[0]}')
        group[1].columns = ['日期', '时间', '时间日期', '标题', '新闻内容', '看涨', '看空', '年份']
        group[1].to_excel(f'{file_name}.xlsx', engine='xlsxwriter', encoding='utf_8_sig', index=False)

    # 获取金色财经头条数据
    print('获取jinse_headlines')
    all_news_df = get_jinse_headlines(update=True)
    file_name = os.path.join(file_path, f'all_jinse_headlines_raw')
    all_news_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)

    news_df = all_news_df[['datetime', 'time', 'title', 'summary', 'content', 'show_read_number', 'up_counts', 'down_counts', 'jump_url']]
    file_name = os.path.join(file_path, f'all_jinse_headlines')
    news_df.columns = ['日期', '时间', '标题', '文章摘要', '快讯内容', '文章阅读数', '快讯看多数', '快讯看空数', '跳转链接']
    news_df.to_excel(f'{file_name}.xlsx', engine='xlsxwriter', encoding='utf_8_sig', index=False)


def get_aicoin_crypto_news():
    file_path = os.path.join(DATA_DIR, 'social_data')

    # 获取aicoin快讯数据
    print('获取aicoin_bulletin')
    all_news_df = get_aicoin_bulletin(update=True)
    file_name = os.path.join(file_path, f'all_aicoin_bulletin_raw')
    all_news_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)

    all_news_df.sort_values(by='id', ascending=True, inplace=True)
    news_df = all_news_df[['date', 'time', 'datetime', 'title', 'content']]
    file_name = os.path.join(file_path, f'all_aicoin_bulletin')
    news_df.columns = ['日期', '时间', '时间日期', '标题', '新闻内容']
    news_df.to_excel(f'{file_name}.xlsx', engine='xlsxwriter', encoding='utf_8_sig', index=False)


if __name__ == '__main__':
    logger.info("开始update_social_metrics")
    task_to_db(os.path.basename(__file__), 'update_social_metrics')
    try:
        for metric in social_metrics:
            update_social_metrics(asset='bitcoin', metric=metric, start_date='2015-01-01', end_date=None)
            time.sleep(2)  # 未避免sanbase发现，每次访问后停顿2s
        task_to_db(os.path.basename(__file__), 'update_social_metrics', 1)
    except Exception as e:
        msg = traceback.format_exc()
        logger.info(msg)
        send_error_to_email(script_name=os.path.basename(__file__), func_name="update_social_metrics", message=msg)
        # raise e
    logger.info("结束update_social_metrics")

    logger.info("开始get_relative_sentiment_data")
    task_to_db(os.path.basename(__file__), 'get_relative_sentiment_data')
    try:
        get_relative_sentiment_data(start_date='2015-01-01', end_date=None)
        task_to_db(os.path.basename(__file__), 'get_relative_sentiment_data', 1)
    except Exception as e:
        msg = traceback.format_exc()
        logger.info(msg)
        send_error_to_email(script_name=os.path.basename(__file__), func_name="get_relative_sentiment_data", message=msg)
        # raise e
    logger.info("结束get_relative_sentiment_data")

    # 定时爬取币圈新闻
    logger.info("开始get_aicoin_crypto_news")
    task_to_db(os.path.basename(__file__), 'get_aicoin_crypto_news')
    try:
        get_aicoin_crypto_news()
        task_to_db(os.path.basename(__file__), 'get_aicoin_crypto_news', 1)
    except Exception as e:
        msg = traceback.format_exc()
        logger.info(msg)
        send_error_to_email(script_name=os.path.basename(__file__), func_name="get_aicoin_crypto_news", message=msg)
        # raise e
    logger.info("结束get_aicoin_crypto_news")

    # 定时爬取币圈新闻
    logger.info("开始get_jinse_crypto_news")
    task_to_db(os.path.basename(__file__), 'get_jinse_crypto_news')
    try:
        get_jinse_crypto_news()
        task_to_db(os.path.basename(__file__), 'get_jinse_crypto_news', 1)
    except Exception as e:
        msg = traceback.format_exc()
        logger.info(msg)
        send_error_to_email(script_name=os.path.basename(__file__), func_name="get_jinse_crypto_news", message=msg)
        # raise e
    logger.info("结束get_jinse_crypto_news")

    # for keywords in [['cryptocurrency'], ['bitcoin'], ['etheruem']]: # 精准匹配
    #     get_google_trends(keywords)
    # for topics in [[r'/m/0vpj4_b']]:  # topic匹配， /m/0vpj4_b对应的是cryptocurrency
    #     get_google_trends(topics)