#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2022/12/6 15:12
# @Author  : CHENWang
# @Site    :
# @File    : alternative_data_analysis.py
# @Software: PyCharm

"""
脚本说明:
"""

import os
import json
import time
import numpy as np
import pandas as pd
import requests
from quant_researcher.quant.project_tool.localize import DATA_DIR
from quant_researcher.quant.project_tool.time_tool import timestamp_to_datetime, timestamp_to_str, get_today, format_date_str
from quant_researcher.quant.project_tool.wrapper_tools.common_wrappers import deco_retry
from quant_researcher.quant.datasource_fetch.crypto_api.crypto_news import get_aicoin_bulletin, get_jinse_bulletin, get_jinse_headlines, get_8btc_bulletin

if __name__ == '__main__':
    # 获取BTC_ohlcv数据
    file_name = os.path.join(DATA_DIR, f'BTC_history_ohlcvm')
    ohlcv_data = pd.read_excel(f'{file_name}.xlsx', index_col='end_date')
    prices_df = ohlcv_data[['close']]
    prices_df['log_price'] = np.log10(prices_df['close'])

    # # 获取aicoin快讯数据
    # file_path = os.path.join(DATA_DIR, 'social_data')
    # file_name = os.path.join(file_path, f'all_aicoin_bulletin_raw')
    # all_aicoin_news_df = pd.read_csv(f'{file_name}.csv', encoding='utf_8_sig')
    #
    # # 新闻量分析
    # all_aicoin_news_df['datetime'] = all_aicoin_news_df['timestamp'].apply(timestamp_to_datetime, tz_str='+0000')  # 转换为UTC时间
    # news_volume_df = all_aicoin_news_df.set_index('datetime')
    # daily_news_volume_df = pd.DataFrame(news_volume_df['id'].resample('D').count())
    # daily_news_source_df = news_volume_df.groupby(['date', 'source'])['id'].count().unstack()
    # daily_news_source_df['aicoin_news_ratio'] = daily_news_source_df[['AICoin', 'AICoin快讯', 'AICoin数据', 'AICoin研究院']].sum(axis=1) / daily_news_source_df.sum(axis=1)
    # daily_news_volume_df['datetime'] = daily_news_volume_df.index
    # daily_news_volume_df.index = daily_news_volume_df['datetime'].dt.strftime('%Y-%m-%d')
    # daily_news_volume_price_df = pd.concat([daily_news_volume_df, daily_news_source_df['aicoin_news_ratio'], prices_df], axis=1, join="inner")
    # daily_news_volume_price_df['num_ma30'] = daily_news_volume_price_df['id'].rolling(30).mean()
    # daily_news_volume_price_df['num_ma60'] = daily_news_volume_price_df['id'].rolling(60).mean()
    # daily_news_volume_price_df['num_ma90'] = daily_news_volume_price_df['id'].rolling(90).mean()
    # daily_news_volume_price_df['num_ma120'] = daily_news_volume_price_df['id'].rolling(120).mean()
    # daily_news_volume_price_df['num_ma150'] = daily_news_volume_price_df['id'].rolling(150).mean()
    # daily_news_volume_price_df['num_ma200'] = daily_news_volume_price_df['id'].rolling(200).mean()
    # news_num_ma7 = daily_news_volume_price_df['num_ma30']
    # ma = pd.Series.rolling(news_num_ma7, 60).mean()
    # std = pd.Series.rolling(news_num_ma7, 60).std()
    # daily_news_volume_price_df['ssr60'] = (news_num_ma7 - ma) / std
    # ma = pd.Series.rolling(news_num_ma7, 90).mean()
    # std = pd.Series.rolling(news_num_ma7, 90).std()
    # daily_news_volume_price_df['ssr90'] = (news_num_ma7 - ma) / std
    # ma = pd.Series.rolling(news_num_ma7, 120).mean()
    # std = pd.Series.rolling(news_num_ma7, 120).std()
    # daily_news_volume_price_df['ssr120'] = (news_num_ma7 - ma) / std
    # ma = pd.Series.rolling(news_num_ma7, 150).mean()
    # std = pd.Series.rolling(news_num_ma7, 150).std()
    # daily_news_volume_price_df['ssr150'] = (news_num_ma7 - ma) / std
    # ma = pd.Series.rolling(news_num_ma7, 200).mean()
    # std = pd.Series.rolling(news_num_ma7, 200).std()
    # daily_news_volume_price_df['ssr200'] = (news_num_ma7 - ma) / std
    #
    # file_path_analysis = os.path.join(DATA_DIR, 'social_data_analysis')
    # file_name = os.path.join(file_path_analysis, f'all_aicoin_daily_news_volume_price')
    # daily_news_volume_price_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=True)

    # # 获取金色财经快讯数据
    # file_path = os.path.join(DATA_DIR, 'social_data')
    # file_name = os.path.join(file_path, f'all_jinse_bulletin_raw')
    # all_jinse_bulletin_news_df = pd.read_csv(f'{file_name}.csv', encoding='utf_8_sig')
    # all_jinse_bulletin_news_df['datetime'] = all_jinse_bulletin_news_df['created_at'].apply(timestamp_to_datetime, tz_str='+0000')
    # all_jinse_bulletin_news_df['time'] = all_jinse_bulletin_news_df['datetime'].dt.time
    # all_jinse_bulletin_news_df['date'] = all_jinse_bulletin_news_df['datetime'].dt.strftime('%Y-%m-%d')
    #
    # all_jinse_bulletin_news_df = all_jinse_bulletin_news_df[['date', 'time', 'datetime', 'id', 'up_counts', 'down_counts', 'attribute', 'grade', 'comment_count']]
    # news_volume_df = all_jinse_bulletin_news_df.set_index('datetime')
    # daily_news_volume_df = news_volume_df.groupby(['date'])['id'].count()
    # daily_news_updown_num_df = news_volume_df.groupby(['date'])['up_counts', 'down_counts'].sum()
    # daily_news_updown_num_df['average_up'] = daily_news_updown_num_df['up_counts'] / daily_news_volume_df
    # daily_news_updown_num_df['average_down'] = daily_news_updown_num_df['down_counts'] / daily_news_volume_df
    # daily_news_updown_num_df['up_ratio'] = daily_news_updown_num_df['up_counts'] / daily_news_updown_num_df.sum(axis=1)
    # daily_news_comment_num_df = news_volume_df.groupby(['date'])['comment_count'].sum()
    # daily_news_attribute_df = news_volume_df.groupby(['date', 'attribute'])['id'].count().unstack()
    # daily_news_attribute_df['selected_ratio'] = daily_news_attribute_df['精选'] / daily_news_volume_df
    # daily_news_grade_df = news_volume_df.groupby(['date', 'grade'])['id'].count().unstack()
    # daily_news_grade_df['total_grade'] = daily_news_grade_df.apply(lambda x: (x * [0, 1, 2, 3, 4, 5]).sum(), axis=1)
    #
    # daily_news_volume_price_df = pd.concat([daily_news_volume_df, daily_news_updown_num_df, daily_news_comment_num_df, daily_news_attribute_df['selected_ratio'], daily_news_grade_df['total_grade'],
    #                                         prices_df.loc[daily_news_volume_df.index[0]:, :]], axis=1, join="outer")
    # daily_news_volume_price_df.sort_index(inplace=True)
    # for analysis_type in ['id', 'up_counts', 'down_counts', 'average_up', 'average_down', 'up_ratio', 'comment_count', 'selected_ratio', 'total_grade']:
    #     daily_news_volume_price_df['ma7'] = daily_news_volume_price_df[analysis_type].rolling(7).mean()
    #     daily_news_volume_price_df['ma30'] = daily_news_volume_price_df[analysis_type].rolling(30).mean()
    #     daily_news_volume_price_df['ma60'] = daily_news_volume_price_df[analysis_type].rolling(60).mean()
    #     daily_news_volume_price_df['ma90'] = daily_news_volume_price_df[analysis_type].rolling(90).mean()
    #     daily_news_volume_price_df['ma120'] = daily_news_volume_price_df[analysis_type].rolling(120).mean()
    #     daily_news_volume_price_df['ma150'] = daily_news_volume_price_df[analysis_type].rolling(150).mean()
    #     daily_news_volume_price_df['ma200'] = daily_news_volume_price_df[analysis_type].rolling(200).mean()
    #     news_num_ma30 = daily_news_volume_price_df['ma30']
    #     ma = pd.Series.rolling(news_num_ma30, 60).mean()
    #     std = pd.Series.rolling(news_num_ma30, 60).std()
    #     daily_news_volume_price_df['ssr60'] = (news_num_ma30 - ma) / std
    #     ma = pd.Series.rolling(news_num_ma30, 90).mean()
    #     std = pd.Series.rolling(news_num_ma30, 90).std()
    #     daily_news_volume_price_df['ssr90'] = (news_num_ma30 - ma) / std
    #     ma = pd.Series.rolling(news_num_ma30, 120).mean()
    #     std = pd.Series.rolling(news_num_ma30, 120).std()
    #     daily_news_volume_price_df['ssr120'] = (news_num_ma30 - ma) / std
    #     ma = pd.Series.rolling(news_num_ma30, 150).mean()
    #     std = pd.Series.rolling(news_num_ma30, 150).std()
    #     daily_news_volume_price_df['ssr150'] = (news_num_ma30 - ma) / std
    #     ma = pd.Series.rolling(news_num_ma30, 200).mean()
    #     std = pd.Series.rolling(news_num_ma30, 200).std()
    #     daily_news_volume_price_df['ssr200'] = (news_num_ma30 - ma) / std
    #
    #     file_path_analysis = os.path.join(DATA_DIR, 'social_data_analysis')
    #     file_name = os.path.join(file_path_analysis, f'all_jinse_bulletin_daily_news_{analysis_type}_price')
    #     daily_news_volume_price_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=True)

    # 获取金色财经头条数据
    file_path = os.path.join(DATA_DIR, 'social_data')
    file_name = os.path.join(file_path, f'all_jinse_headlines_raw')
    all_jinse_headlines_news_df = pd.read_csv(f'{file_name}.csv', encoding='utf_8_sig')
    all_jinse_headlines_news_df['datetime'] = all_jinse_headlines_news_df['timestamp'].apply(timestamp_to_datetime, tz_str='+0000')
    all_jinse_headlines_news_df['time'] = all_jinse_headlines_news_df['datetime'].dt.time
    all_jinse_headlines_news_df['date'] = all_jinse_headlines_news_df['datetime'].dt.strftime('%Y-%m-%d')

    all_jinse_headlines_news_df = all_jinse_headlines_news_df[['id', 'datetime', 'date', 'time', 'title', 'summary', 'show_read_number', 'up_counts', 'down_counts']]
    news_volume_df = all_jinse_headlines_news_df.set_index('datetime')
    daily_news_volume_df = news_volume_df.groupby(['date'])['id'].count()
    daily_news_updown_num_df = news_volume_df.groupby(['date'])[['up_counts', 'down_counts', 'show_read_number']].sum()

    daily_news_volume_price_df = pd.concat([daily_news_volume_df, daily_news_updown_num_df, prices_df.loc[daily_news_volume_df.index[0]:, :]], axis=1, join="outer")
    daily_news_volume_price_df.sort_index(inplace=True)
    daily_news_volume_price_df = daily_news_volume_price_df.loc['2019-12-31':, :]
    for analysis_type in ['id', 'up_counts', 'down_counts', 'show_read_number']:
        daily_news_volume_price_df['ma7'] = daily_news_volume_price_df[analysis_type].rolling(7).mean()
        daily_news_volume_price_df['ma30'] = daily_news_volume_price_df[analysis_type].rolling(30).mean()
        daily_news_volume_price_df['ma60'] = daily_news_volume_price_df[analysis_type].rolling(60).mean()
        daily_news_volume_price_df['ma90'] = daily_news_volume_price_df[analysis_type].rolling(90).mean()
        daily_news_volume_price_df['ma120'] = daily_news_volume_price_df[analysis_type].rolling(120).mean()
        daily_news_volume_price_df['ma150'] = daily_news_volume_price_df[analysis_type].rolling(150).mean()
        daily_news_volume_price_df['ma200'] = daily_news_volume_price_df[analysis_type].rolling(200).mean()
        news_num_ma30 = daily_news_volume_price_df['ma30']
        ma = pd.Series.rolling(news_num_ma30, 60).mean()
        std = pd.Series.rolling(news_num_ma30, 60).std()
        daily_news_volume_price_df['ssr60'] = (news_num_ma30 - ma) / std
        ma = pd.Series.rolling(news_num_ma30, 90).mean()
        std = pd.Series.rolling(news_num_ma30, 90).std()
        daily_news_volume_price_df['ssr90'] = (news_num_ma30 - ma) / std
        ma = pd.Series.rolling(news_num_ma30, 120).mean()
        std = pd.Series.rolling(news_num_ma30, 120).std()
        daily_news_volume_price_df['ssr120'] = (news_num_ma30 - ma) / std
        ma = pd.Series.rolling(news_num_ma30, 150).mean()
        std = pd.Series.rolling(news_num_ma30, 150).std()
        daily_news_volume_price_df['ssr150'] = (news_num_ma30 - ma) / std
        ma = pd.Series.rolling(news_num_ma30, 200).mean()
        std = pd.Series.rolling(news_num_ma30, 200).std()
        daily_news_volume_price_df['ssr200'] = (news_num_ma30 - ma) / std

        file_path_analysis = os.path.join(DATA_DIR, 'social_data_analysis')
        file_name = os.path.join(file_path_analysis, f'all_jinse_headlines_daily_news_{analysis_type}_price')
        daily_news_volume_price_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=True)