#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2022/12/6 15:12
# @Author  : CHENWang
# @Site    : 
# @File    : crypto_news.py
# @Software: PyCharm

"""
脚本说明:
"""

import os
import json
import time
import numpy as np
import pandas as pd
import requests
from quant_researcher.quant.project_tool.localize import DATA_DIR
from quant_researcher.quant.project_tool.time_tool import timestamp_to_datetime, timestamp_to_str, get_today, format_date_str
from quant_researcher.quant.project_tool.wrapper_tools.common_wrappers import deco_retry


@deco_retry(retry=10, retry_sleep=15)
def get_jinse_latest_bulletin():
    today = get_today(marker='with_n_dash')

    header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
              'cookie': 'userId=eyJpdiI6Ikt5a0JiaWUra0tiZllPVkxjYlhvM2c9PSIsInZhbHVlIjoiU21pZkI2dzV0QzhVMjdOcVg2Q3FFVnVhU3VLMTRVbFwvRlVZYXQwWWd5UEJmMkVWNTJ3UEdYZ1wvYUVTaHBleDhJcGJxa3k2WHNnR3RrYjlZM0pXM1c3UT09IiwibWFjIjoiZDQyNzc2M2RhY2EzZjI0MzI2YmJiYjM5ZDA5YTQwZWRhYmI2YTE5ODgyZDI0ZWI2ZmUxZDFmNjY2ZGNiOGZhOCJ9; is_refresh=eyJpdiI6Ikhra0huSnpxU2dJRWFiZVFCRDQzbEE9PSIsInZhbHVlIjoiejRNcWpCUVdQR2xaMUJ0N2VaOFNBZz09IiwibWFjIjoiY2JjZmZiZWMyMjkzYzNiNDVhNjQ3M2IxYTZmZjVhYzY3MDFiYmY4MTA3YjFhMmVhMWI5ZGVlZGY2MGE0MDU4NSJ9; Hm_lvt_3b668291b682e6dc69686a3e2445e11d=1669185149,1670309203,1670310605; Hm_lvt_56b68a6c42577e34b7c82cbd85f82c52=1669185149,1670309203,1670310605; Hm_lpvt_56b68a6c42577e34b7c82cbd85f82c52=1670311055; Hm_lpvt_3b668291b682e6dc69686a3e2445e11d=1670311055'}

    url = f'https://api.jinse.com/noah/v2/lives?limit={1000}&reading=false&source=web&flag=up&id={500000}&category=0'
    res = requests.get(url, headers=header, timeout=30)
    news_content = json.loads(res.text)['list']

    date_list = []
    news_list = []
    for each_data in news_content:
        date_list.append(each_data['date'])
        news = each_data['lives']
        news_list.extend(news)

    all_news_df = pd.DataFrame(news_list)
    all_news_df.sort_values(by='id', ascending=True, inplace=True)
    all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)
    all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_datetime)
    all_news_df['time'] = all_news_df['datetime'].dt.time
    all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_str)
    all_news_df.rename(columns={'created_at_zh': 'date'}, inplace=True)

    return all_news_df


@deco_retry(retry=50, retry_sleep=15)
def get_jinse_bulletin(update=False):
    """

    :param update: 为False则获取全部数据，否则仅更新最新数据
    :return:
    """

    today = get_today(marker='with_n_dash')

    header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
              'cookie': 'userId=eyJpdiI6Ikt5a0JiaWUra0tiZllPVkxjYlhvM2c9PSIsInZhbHVlIjoiU21pZkI2dzV0QzhVMjdOcVg2Q3FFVnVhU3VLMTRVbFwvRlVZYXQwWWd5UEJmMkVWNTJ3UEdYZ1wvYUVTaHBleDhJcGJxa3k2WHNnR3RrYjlZM0pXM1c3UT09IiwibWFjIjoiZDQyNzc2M2RhY2EzZjI0MzI2YmJiYjM5ZDA5YTQwZWRhYmI2YTE5ODgyZDI0ZWI2ZmUxZDFmNjY2ZGNiOGZhOCJ9; is_refresh=eyJpdiI6Ikhra0huSnpxU2dJRWFiZVFCRDQzbEE9PSIsInZhbHVlIjoiejRNcWpCUVdQR2xaMUJ0N2VaOFNBZz09IiwibWFjIjoiY2JjZmZiZWMyMjkzYzNiNDVhNjQ3M2IxYTZmZjVhYzY3MDFiYmY4MTA3YjFhMmVhMWI5ZGVlZGY2MGE0MDU4NSJ9; Hm_lvt_3b668291b682e6dc69686a3e2445e11d=1669185149,1670309203,1670310605; Hm_lvt_56b68a6c42577e34b7c82cbd85f82c52=1669185149,1670309203,1670310605; Hm_lpvt_56b68a6c42577e34b7c82cbd85f82c52=1670311055; Hm_lpvt_3b668291b682e6dc69686a3e2445e11d=1670311055'}

    if not update:
        id_start = 5000
    else:
        file_path = os.path.join(DATA_DIR, 'social_data')
        file_name = os.path.join(file_path, f'all_jinse_bulletin_raw')
        history_news_df = pd.read_csv(f'{file_name}.csv')
        id_start = max(history_news_df['id']) + 1000

    page_limit = 500
    all_news_list = []
    while True:
        url = f'https://api.jinse.com/noah/v2/lives?limit={page_limit}&reading=false&source=web&flag=down&id={id_start}&category=0'
        res = requests.get(url, headers=header, timeout=15)
        news_content = json.loads(res.text)['list']

        date_list = []
        news_list = []
        for each_data in news_content:
            date_list.append(each_data['date'])
            news = each_data['lives']
            news_list.extend(news)
        max_date = max(date_list)
        all_news_list.extend(news_list)
        id_start = id_start + page_limit - 1
        time.sleep(5)  # 避免数据读取太频繁

        if max_date == today:  # 数据更新至今天无需再更新
            break

    all_news_df = pd.DataFrame(all_news_list)
    all_news_df.sort_values(by='id', ascending=True, inplace=True)
    all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)
    all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_datetime)
    all_news_df['time'] = all_news_df['datetime'].dt.time
    all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_str)
    all_news_df.rename(columns={'created_at_zh': 'date'}, inplace=True)

    if update:
        all_news_df = pd.concat([all_news_df, history_news_df], axis=0)
        all_news_df.sort_values(by='id', ascending=True, inplace=True)
        all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)

    if (all_news_df == 0).all().values[0]:
        print(f'获取的数据全为0， 请检查入参是否正常')
        return None

    return all_news_df


@deco_retry(retry=50, retry_sleep=15)
def get_jinse_headlines(update=False):
    """

    :param update: 为False则获取全部数据，否则仅更新最新数据
    :return:
    """

    today = get_today(marker='with_n_dash')

    header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
              'cookie': 'userId=eyJpdiI6Ikt5a0JiaWUra0tiZllPVkxjYlhvM2c9PSIsInZhbHVlIjoiU21pZkI2dzV0QzhVMjdOcVg2Q3FFVnVhU3VLMTRVbFwvRlVZYXQwWWd5UEJmMkVWNTJ3UEdYZ1wvYUVTaHBleDhJcGJxa3k2WHNnR3RrYjlZM0pXM1c3UT09IiwibWFjIjoiZDQyNzc2M2RhY2EzZjI0MzI2YmJiYjM5ZDA5YTQwZWRhYmI2YTE5ODgyZDI0ZWI2ZmUxZDFmNjY2ZGNiOGZhOCJ9; is_refresh=eyJpdiI6Ikhra0huSnpxU2dJRWFiZVFCRDQzbEE9PSIsInZhbHVlIjoiejRNcWpCUVdQR2xaMUJ0N2VaOFNBZz09IiwibWFjIjoiY2JjZmZiZWMyMjkzYzNiNDVhNjQ3M2IxYTZmZjVhYzY3MDFiYmY4MTA3YjFhMmVhMWI5ZGVlZGY2MGE0MDU4NSJ9; Hm_lvt_3b668291b682e6dc69686a3e2445e11d=1669185149,1670309203,1670310605; Hm_lvt_56b68a6c42577e34b7c82cbd85f82c52=1669185149,1670309203,1670310605; Hm_lpvt_56b68a6c42577e34b7c82cbd85f82c52=1670311055; Hm_lpvt_3b668291b682e6dc69686a3e2445e11d=1670311055'}

    if not update:
        information_id_start = 500000
        stop_id = 1
    else:
        file_path = os.path.join(DATA_DIR, 'social_data')
        file_name = os.path.join(file_path, f'all_jinse_headlines_raw')
        history_news_df = pd.read_csv(f'{file_name}.csv')
        information_id_start = max(history_news_df['object_id']) + 10000
        stop_id = max(history_news_df['object_id'])

    @deco_retry(retry=100, retry_sleep=15)
    def data_crawer(information_id_start):
        url = f'https://api.jinse.com/noah/v3/timelines?catelogue_key=www&limit={page_limit}&information_id={information_id_start}&flag=down'
        res = requests.get(url, headers=header, timeout=15)
        return res

    page_limit = 100
    all_news_list = []
    while True:
        res = data_crawer(information_id_start)
        news_content = json.loads(res.text)['data']

        news_list = []
        for each_news in news_content['list']:
            for i in range(1, 10):
                if f'object_{i}' in each_news.keys():
                    each_news.update(each_news[f'object_{i}'])
                    del each_news[f'object_{i}']
            news_list.extend([each_news])

        news_df = pd.DataFrame(news_list)
        news_df['datetime'] = news_df['published_at'].apply(timestamp_to_datetime)
        news_df['time'] = news_df['datetime'].dt.time
        news_df['datetime'] = news_df['published_at'].apply(timestamp_to_str)
        news_df.rename(columns={'published_at': 'timestamp'}, inplace=True)
        news_df.sort_values(by='object_id', ascending=True, inplace=True)

        all_news_list.append(news_df)
        information_id_start = max(news_content['bottom_id'], min(news_df['object_id'])) + 10
        time.sleep(5)  # 避免数据读取太频繁

        print(min(news_df['object_id']))
        if min(news_df['object_id']) <= stop_id:  # 数据全部更新完
            break

    all_news_df = pd.concat(all_news_list, axis=0)
    all_news_df.sort_values(by='object_id', ascending=True, inplace=True)
    all_news_df.drop_duplicates(subset=['object_id'], keep='first', inplace=True, ignore_index=True)

    if update:
        all_news_df = pd.concat([all_news_df, history_news_df], axis=0)
        all_news_df.sort_values(by='object_id', ascending=True, inplace=True)
        all_news_df.drop_duplicates(subset=['object_id'], keep='first', inplace=True, ignore_index=True)

    if (all_news_df == 0).all().values[0]:
        print(f'获取的数据全为0， 请检查入参是否正常')
        return None

    return all_news_df


@deco_retry(retry=5, retry_sleep=15)
def get_8btc_bulletin(update=False):
    """

    :param update: 为False则获取全部数据，否则仅更新最新数据
    :return:
    """

    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
        'authorization': '{"secretKeyVersion":1,"sign":"gDt1nQ3Ay458FG_Xj-Aum04fftADIMj_3BUwAh4Y_TLCz1LRhilWuUod5ejc38HnTWeDNrlOJirimPSo2PO0DQ=="}',
        # 'authorization': '{"secretKeyVersion":1,"sign":"gDt1nQ3Ay458FG_Xj-Aum04fftADIMj_3BUwAh4Y_TIsanAgCt4b5lDPU4pCMkifTWeDNrlOJirimPSo2PO0DQ=="}',
        'gate_token': 'eyJ0eXBlIjoiMSIsImFsZyI6IkhTNTEyIn0.eyJkZXZpY2VfaWQiOiJMTEdlcTFhSlNvRkRkVnRWR2RMQyIsImV4cGlyZV90aW1lIjoxNjcwMzgyMTYyLCJwcm9kdWN0X2NvZGUiOiI4YnRjIiwic291cmNlIjoid2ViIiwidWlkIjoxMDkzMzY3LCJ1c2VyX3R5cGUiOiIxIn0.AUEhCEUD1-Q402qVBIlDEBItiUlsrwpKMTK932soS8MtTza_5sia1Z6iaH6zXK5gnKK-iMDj6_lhriGi6XPruQ'
    }

    if not update:
        id_start = 5000
    else:
        file_path = os.path.join(DATA_DIR, 'social_data')
        file_name = os.path.join(file_path, f'all_jinse_bulletin_raw')
        history_news_df = pd.read_csv(f'{file_name}.csv')
        id_start = max(history_news_df['id'])

    old_max_id = 0
    page_limit = 5000
    all_news_list = []
    while True:
        url = f'https://gate.8btc.cn:8443/one-graph-auth/graphql'
        body = {'listFlash': 'listFlash',
                'query': "query listFlash($first: Int, $after: String, $showOn7x24h: Boolean, $flashCategory: FlashCategory = GENERAL, $tag: Int, $startTime: Date, $endTime: Date) {\n  articleGraph {\n    list: listFlash(page: {first: $first, after: $after, pattern: CURSOR}, param: {flashCategory: $flashCategory, showOn7x24h: $showOn7x24h, tagId: $tag, startTime: $startTime, endTime: $endTime}) {\n      edges {\n        node {\n          id\n          post {\n            title\n            desc\n            content\n            postDate\n            thumbnail\n            __typename\n          }\n          extra {\n            authorInfo {\n              base {\n                displayName\n                __typename\n              }\n              __typename\n            }\n            source {\n              link\n              name\n              __typename\n            }\n            __typename\n          }\n          ... on Flash {\n            highlight\n            push\n            sense {\n              down\n              up\n              select\n              __typename\n            }\n            __typename\n          }\n          __typename\n        }\n        __typename\n      }\n      pageInfo {\n        hasNextPage\n        totalCount\n        startCursor\n        endCursor\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}\n",
                'variables': {
                    "flashCategory": "GENERAL",
                    "first": 20,
                    "after": "MTk=",
                    "showOn7x24h": True,
                    "tag": None,
                    "startTime": None,
                    "endTime": None
                }}
        # res = requests.post(url=url, headers=header, timeout=15)
        res = requests.post(url=url, headers=header, timeout=15, json=body)
        news_content = json.loads(res.text)

        news_list = []
        for each_data in news_content:
            news = each_data['lives']
            news_list.extend(news)
        max_id = max([i['id'] for i in news_list])
        if max_id == old_max_id:
            break
        else:
            old_max_id = max_id
            all_news_list.extend(news_list)
            id_start = id_start + page_limit - 1
            time.sleep(1)  # 避免数据读取太频繁

    all_news_df = pd.DataFrame(all_news_list)
    all_news_df.sort_values(by='id', ascending=False, inplace=True)
    all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)
    all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_datetime)
    all_news_df['time'] = all_news_df['datetime'].dt.time
    all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_str)
    all_news_df.rename(columns={'created_at_zh': 'date'}, inplace=True)

    if update:
        all_news_df = pd.concat([all_news_df, history_news_df], axis=0)
        all_news_df.sort_values(by='id', ascending=False, inplace=True)
        all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)

    if (all_news_df == 0).all().values[0]:
        print(f'获取的数据全为0， 请检查入参是否正常')
        return None

    return all_news_df


@deco_retry(retry=5, retry_sleep=15)
def get_aicoin_bulletin(update=False):
    """

    :param update: 为False则获取全部数据，否则仅更新最新数据
    :return:
    """

    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
        'referer': 'https://www.aicoin.com/?long_live_aicoin=true',
        'accept-encoding': 'gzip, deflate, br',
        'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'x-requested-with': 'XMLHttpRequest',
        'x-xsrf-token': 'eyJpdiI6IlcrRWk5dWFEajBPME8zRk5XRFBvYVE9PSIsInZhbHVlIjoiNlBiMXh2UmhaQzZVcVwvajVrYkw5Vk81bVwvWDNCMDcrbkJSbE1zRVYzU3ZHNmMxNEc5ZWU4OUJNN0lpVVNKMlErVWhuUUxkWnZCNnpPcXJ3WVNIRDhYdz09IiwibWFjIjoiOGQ4MDIwNzU1N2VjYmQ3MzEzYTY5MGU4Y2UzYzBiY2M4ZGUzNGZjNDg0Yzk3MjFlNGJhYTk2OTA0NjJmZDAyZCJ9',
        'cookie': 'Hm_lvt_3c606e4c5bc6e9ff490f59ae4106beb4=1670386124; _pk_id.2.2253=8767976bf2f08a57.1670386124.; _ga=GA1.2.1895126092.1670386124; _gid=GA1.2.1466404444.1670386124; __gads=ID=6076f0c35e74f145-2236f903c2d8007b:T=1670386124:RT=1670386124:S=ALNI_MaSeuKrIY_bky5Qay-k_gFw_0Fhtg; __gpi=UID=00000b8b8554d4d8:T=1670386124:RT=1670386124:S=ALNI_MYCDBMWdxYwRQIrPeGDt_6sLrjGeg; _pk_ref.2.e882=["","",1670391287,"https://www.aicoin.com/?lang=zh&long_lives_aicoin=\"live\""]; _pk_id.2.e882=89c026319d322b78.1670391287.; _pk_ses.2.e882=1; _gat_gtag_UA_108140256_2=1; _iidt=rIExq6ZV5rEt/VkBKm6SwK8weslHjkixKyC1wFtsvQxxE3JVjplOc3jIjbYf3yNaKURwODoZKHmasQ==; _vid_t=EC6xVHkkgSdA5nlqCpFXE455VU9Yf7S1clZzkntx9J8Z1wAXqYoPMZwV8jN3+Y5iFwrtI0v67lC6Pg==; XSRF-TOKEN=eyJpdiI6IlcrRWk5dWFEajBPME8zRk5XRFBvYVE9PSIsInZhbHVlIjoiNlBiMXh2UmhaQzZVcVwvajVrYkw5Vk81bVwvWDNCMDcrbkJSbE1zRVYzU3ZHNmMxNEc5ZWU4OUJNN0lpVVNKMlErVWhuUUxkWnZCNnpPcXJ3WVNIRDhYdz09IiwibWFjIjoiOGQ4MDIwNzU1N2VjYmQ3MzEzYTY5MGU4Y2UzYzBiY2M4ZGUzNGZjNDg0Yzk3MjFlNGJhYTk2OTA0NjJmZDAyZCJ9; aicoin_token=zKuTGf7MXI9T66KAduEpJC1xgL84kaw5BJLOwBE38FsF7uMOrFu9DymrJrCQlbtjdh5wovzeHBRzLSlgcCy3XQLUabpL+ZcupRRlKE/DDsL0c9eIblYtrd86tqMZQzM90NMn23F4SMHGyTQ27JLVyVoaOOCh+DZlW1toyWeFMfL7a1V7eobg6dMUlRrdSLx/iGt0SHlA6gVm8b96X8jBNuFp33gV9nj4OVt70gJ/i/Q=; __els__=1; _pk_ref.2.2253=["","",1670391311,"https://www.google.com/"]; _pk_ses.2.2253=1; Hm_lpvt_3c606e4c5bc6e9ff490f59ae4106beb4=1670391311; aicoin_session=eyJpdiI6IkZVSm5kemVkUFwvWGlYV2xLYjdEdnVnPT0iLCJ2YWx1ZSI6InNKK1wvRmRsWHViRGxnN3JQUkRwd1A5eGZDUklRalVYV25NMldxYStXUWIyZGcxMHorY3hibjhyWmt2aWxJa1wvMm44dFY1QURcL09aNWE2RkJCa2NsZXlRPT0iLCJtYWMiOiJiYjMwNjRkODFhNjU0NzA4N2IwNDYwNDkwYzM2NjRkYTdhNzE1NjRhODdlMzAyNTZiYWUzMWM5NTVkYTk3MzgyIn0='
    }

    if not update:
        last_id = 21457220
        min_id = 0
    else:
        url = f'https://www.aicoin.com/api/chart/kline/common/flash?first_id='
        res = requests.get(url=url, headers=header, timeout=15)
        news_content = json.loads(res.text)['data']['list']
        news_df = pd.DataFrame(news_content)
        last_id = max(news_df['id'])

        file_path = os.path.join(DATA_DIR, 'social_data')
        file_name = os.path.join(file_path, f'all_aicoin_bulletin_raw')
        history_news_df = pd.read_csv(f'{file_name}.csv')
        min_id = max(history_news_df['id'])


    @deco_retry(retry=100, retry_sleep=15)
    def data_crawer(last_id):
        url = f'https://www.aicoin.com/api/chart/kline/common/flash?last_id={last_id}'
        res = requests.get(url=url, headers=header, timeout=15)
        return res

    all_news_df_list = []
    while last_id >= min_id:
        print(f'开始获取id={last_id}之后的快讯')
        res = data_crawer(last_id)
        news_content = json.loads(res.text)['data']['list']
        if res.status_code == 200 and news_content:
            news_df = pd.DataFrame(news_content)
            all_news_df_list.append(news_df)
            if min(news_df['id']) == 1:
                last_id = 0
            else:
                last_id = min(news_df['id'])
        else:
            if res.status_code != 200:
                print(f'id={last_id}之后的快讯获取异常： {res.text}')
            if res.status_code == 200 and not news_content:
                print(f'快讯已经获取到头')
                last_id = 0

    all_news_df = pd.concat(all_news_df_list, axis=0)
    all_news_df.sort_values(by='id', ascending=True, inplace=True)
    all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)
    all_news_df.rename(columns={'time': 'timestamp'}, inplace=True)
    all_news_df['datetime'] = all_news_df['timestamp'].apply(timestamp_to_datetime)
    all_news_df['date'] = all_news_df['datetime'].dt.date
    all_news_df['time'] = all_news_df['datetime'].dt.time
    all_news_df['datetime'] = all_news_df['timestamp'].apply(timestamp_to_str, fmt='%Y-%m-%d %H:%M:%S')

    if update:
        all_news_df = pd.concat([all_news_df, history_news_df], axis=0)
        all_news_df.drop_duplicates(subset=['id'], keep='first', inplace=True, ignore_index=True)
        all_news_df.sort_values(by='id', ascending=True, inplace=True)

    if (all_news_df == 0).all().values[0]:
        print(f'获取的数据全为0， 请检查入参是否正常')
        return None

    return all_news_df


if __name__ == '__main__':
    # # 获取aicoin快讯数据
    # all_news_df = get_aicoin_bulletin(update=True)
    # file_path = os.path.join(DATA_DIR, 'social_data')
    # file_name = os.path.join(file_path, f'all_aicoin_bulletin_raw')
    # all_news_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)
    #
    # all_news_df.sort_values(by='id', ascending=True, inplace=True)
    # news_df = all_news_df[['date', 'time', 'datetime', 'title', 'content']]
    # file_name = os.path.join(file_path, f'all_aicoin_bulletin')
    # news_df.columns = ['日期', '时间', '时间日期', '标题', '新闻内容']
    # news_df.to_excel(f'{file_name}.xlsx', engine='xlsxwriter', encoding='utf_8_sig', index=False)

    # # 新闻量分析
    # all_news_df['datetime'] = all_news_df['timestamp'].apply(timestamp_to_datetime, tz_str='+0000')
    # news_volume_df = all_news_df.set_index('datetime')
    # daily_news_volume_df = pd.DataFrame(news_volume_df['id'].resample('D').count())
    # daily_news_volume_df['datetime'] = daily_news_volume_df.index
    # daily_news_volume_df.index = daily_news_volume_df['datetime'].dt.strftime('%Y-%m-%d')
    #
    # from quant_researcher.quant.datasource_fetch.crypto_api.glassnode import get_prices
    # prices_df = get_prices(ohlc=False, asset='BTC', start_date='2017-08-10', end_date='2022-12-06', interval='24h')
    # prices_df = prices_df[['close']]
    # prices_df['log_price'] = np.log10(prices_df['close'])
    #
    # daily_news_volume_price_df = pd.concat([daily_news_volume_df, prices_df], axis=1)
    # daily_news_volume_price_df['id_ma7'] = daily_news_volume_price_df['id'].rolling(7).mean()
    # daily_news_volume_price_df['id_ma30'] = daily_news_volume_price_df['id'].rolling(30).mean()
    # file_name = os.path.join(file_path, f'all_aicoin_daily_news_volume_price')
    # daily_news_volume_price_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)

    # # 获取8btc快讯数据
    # all_news_df = get_8btc_bulletin(update=False)
    #
    # # 获取金色财经快讯数据
    # all_news_df = get_jinse_bulletin(update=True)
    # file_path = os.path.join(DATA_DIR, 'social_data')
    # file_name = os.path.join(file_path, f'all_jinse_bulletin_raw')
    # all_news_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)
    #
    # news_df = all_news_df[['date', 'time', 'datetime', 'content_prefix', 'content', 'up_counts', 'down_counts']]
    # news_df['year'] = news_df['date'].str[:4]
    # for group in news_df.groupby(by='year'):
    #     file_name = os.path.join(file_path, f'all_jinse_bulletin_{group[0]}')
    #     group[1].columns = ['日期', '时间', '时间日期', '标题', '新闻内容', '看涨', '看空', '年份']
    #     group[1].to_excel(f'{file_name}.xlsx', engine='xlsxwriter', encoding='utf_8_sig', index=False)

    # # 新闻量分析
    # all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_datetime, tz_str='+0000')
    # news_volume_df = all_news_df.set_index('datetime')
    # daily_news_volume_df = pd.DataFrame(news_volume_df['id'].resample('D').count())
    # daily_news_volume_df['datetime'] = daily_news_volume_df.index
    # daily_news_volume_df.index = daily_news_volume_df['datetime'].dt.strftime('%Y-%m-%d')
    #
    # from quant_researcher.quant.datasource_fetch.crypto_api.glassnode import get_prices
    # prices_df = get_prices(ohlc=False, asset='BTC', start_date='2017-08-10', end_date='2022-12-06', interval='24h')
    # prices_df = prices_df[['close']]
    # prices_df['log_price'] = np.log10(prices_df['close'])
    #
    # daily_news_volume_price_df = pd.concat([daily_news_volume_df, prices_df], axis=1)
    # daily_news_volume_price_df['id_ma7'] = daily_news_volume_price_df['id'].rolling(7).mean()
    # daily_news_volume_price_df['id_ma30'] = daily_news_volume_price_df['id'].rolling(30).mean()
    # file_name = os.path.join(file_path, f'all_jinse_daily_news_volume_price')
    # daily_news_volume_price_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)

    # 获取金色财经头条数据
    all_news_df = get_jinse_headlines(update=True)
    file_path = os.path.join(DATA_DIR, 'social_data')
    file_name = os.path.join(file_path, f'all_jinse_headlines_raw')
    all_news_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)

    news_df = all_news_df[['datetime', 'time', 'title', 'summary', 'content', 'show_read_number', 'up_counts', 'down_counts', 'jump_url']]
    file_name = os.path.join(file_path, f'all_jinse_headlines')
    news_df.columns = ['日期', '时间', '标题', '文章摘要', '快讯内容', '文章阅读数', '快讯看多数', '快讯看空数', '跳转链接']
    news_df.to_excel(f'{file_name}.xlsx', engine='xlsxwriter', encoding='utf_8_sig', index=False)

    # # 新闻量分析
    # all_news_df['datetime'] = all_news_df['created_at'].apply(timestamp_to_datetime, tz_str='+0000')
    # news_volume_df = all_news_df.set_index('datetime')
    # daily_news_volume_df = pd.DataFrame(news_volume_df['id'].resample('D').count())
    # daily_news_volume_df['datetime'] = daily_news_volume_df.index
    # daily_news_volume_df.index = daily_news_volume_df['datetime'].dt.strftime('%Y-%m-%d')
    #
    # from quant_researcher.quant.datasource_fetch.crypto_api.glassnode import get_prices
    # prices_df = get_prices(ohlc=False, asset='BTC', start_date='2017-08-10', end_date='2022-12-06', interval='24h')
    # prices_df = prices_df[['close']]
    # prices_df['log_price'] = np.log10(prices_df['close'])
    #
    # daily_news_volume_price_df = pd.concat([daily_news_volume_df, prices_df], axis=1)
    # daily_news_volume_price_df['id_ma7'] = daily_news_volume_price_df['id'].rolling(7).mean()
    # daily_news_volume_price_df['id_ma30'] = daily_news_volume_price_df['id'].rolling(30).mean()
    # file_name = os.path.join(file_path, f'all_jinse_daily_news_volume_price')
    # daily_news_volume_price_df.to_csv(f'{file_name}.csv', encoding='utf_8_sig', index=False)