#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2021/11/17 13:14
# @Author  : CHEN Wang
# @Site    :
# @File    : sanbase.py
# @Software: PyCharm

"""
脚本说明: 从sanbase上获取相关指标信息
"""

import san
import time
import numpy as np
import pandas as pd
import requests
import json
import os
import sys
import random
import datetime
from quant_researcher.quant.project_tool.wrapper_tools.common_wrappers import deco_retry
from quant_researcher.quant.datasource_fetch.basic_model.requests_model import MyRequest
from quant_researcher.quant.project_tool.localize import DATA_DIR
from quant_researcher.quant.project_tool.time_tool import get_today

os.chdir('../..')
sys.path.insert(0, os.getcwd())

API_KEY = '7xxan7qwp35wfcms_j456cek7ldh55dfs'  # jeffzhangzhenda@hotmail.com
san.ApiConfig.api_key = API_KEY  # 设置API_KEY


def get_asset_list():
    """
    获取所有支持的资产信息

    :return:
            marketSegment      name      slug    ticker    totalSupply
       0   Cryptocurrency   Bitcoin   bitcoin     BTC       18875537
       1   Smart Contracts  Ethereum  ethereum    ETH       118374300.749

    """

    res = san.get("projects/all")

    return res


def get_metrics_list(asset=None, specific_metric=None):
    """
    获取适用于asset的所有指标信息

    :param str asset: 资产类别，默认为None, 必须输入完整资产代码，如bitcoin, ethereum
    :param str specific_metric: 检测包含某一关键词的指标，如包含'trend'的指标

    """

    if asset is None:
        metrics_list = san.available_metrics()
    else:
        metrics_list = san.available_metrics_for_slug(slug=asset)

    metrics_list = list(set(metrics_list))  # 去重
    metrics_list.sort()

    if specific_metric is not None:
        # 手动检测是否包含某个指标
        res = []
        for metrics in metrics_list:
            if specific_metric in metrics:
                res.append(metrics)
        return res

    return metrics_list


def get_social_metrics_list(asset=None):
    """
    获取适用于asset的所有舆情指标信息

    :param str asset: 资产类别，默认为None, 必须输入完整资产代码，如bitcoin, ethereum
    """

    metrics_list = get_metrics_list(asset=asset)
    social_metrics_list = []
    for metric in metrics_list:
        if 'twitter' in metric:
            social_metrics_list.append(metric)
        elif 'social' in metric:
            social_metrics_list.append(metric)
        elif 'community' in metric:
            social_metrics_list.append(metric)
        elif 'github' in metric:
            social_metrics_list.append(metric)
        elif 'sentiment' in metric:
            social_metrics_list.append(metric)
    if asset == 'bitcoin':
        if 'social_active_users' in social_metrics_list:
            social_metrics_list.remove('social_active_users')  # 该指标获取不到数据
        if 'nft_social_volume' in social_metrics_list:
            social_metrics_list.remove('nft_social_volume')  # 该指标获取不到数据
    social_metrics_list.sort()
    return social_metrics_list


def get_top_social_gainer_loser(start_date="2019-01-01", end_date=None, size=5, time_window='2d'):
    """
    Top social gainers/losers returns the social volume changes for crypto projects.

    :return:
    """
    if end_date is None:
        end_date = get_today(marker='with_n_dash')
    try:
        df = san.get(
            "top_social_gainers_losers",
            from_date=start_date,
            to_date=end_date,
            size=size,
            time_window=time_window,
            status="ALL"
        )
    except Exception as e:
        if 'is too complex' in str(e):
            print(f'数据太长无法一次性获取， 需要分段获取')
            complexity = int((str(e).split('complexity is ')[2]).split(' and maximum is ')[0])
            num = int(np.ceil(complexity / 20000))
            time_range = list(pd.date_range(start_date, end_date).strftime("%Y-%m-%d"))
            time_gap = int(np.ceil(len(time_range) / num))
            time_range_list = []
            for i in range(num):
                if i != num - 1:
                    time_range_list.append([time_range[i * time_gap], time_range[(i + 1) * time_gap - 1]])
                else:
                    time_range_list.append([time_range[i * time_gap], time_range[-1]])
            res = []
            for time_range in time_range_list:
                df = san.get(
                    "top_social_gainers_losers",
                    from_date=time_range[0],
                    to_date=time_range[1],
                    size=size,
                    time_window=time_window,
                    status="ALL"
                )
                res.append(df)
            df = pd.concat(res, axis=1)
        else:
            return None

    return df


def get_indicators(indic_name=None, asset='bitcoin', start_date=None, end_date=None, interval='1d', test_times=5,
                   **kwargs):
    """

    Retrieving data from the API
    The data is fetched by providing a string in the format query/slug and additional parameters.
    query: Available queries can be found in section:
    slug: A list of projects with their slugs, names, etc

    :param str indic_name: 指标名称，上述例子中对应的指标名称为 asopr; indic_code 和 indic_name只需输入其中一个就行
    :param str asset: 资产类别，默认为'bitcoin', 必须输入完整资产代码，如bitcoin, ethereum
    :param str start_date: 数据开始时间， 默认为None, A date or datetime in iso8601 format specifying the start and end datetime for the returned data or the string for ex: 2018-06-01, or a string, representing the relative datetime
    :param str end_date: 数据结束时间，默认为None
    :param str interval: 数据频率: The interval of the returned data - an integer followed by one of: s, m, h, d or w
                         ###  取更低频率，默认聚合方式为平均  ###
    :param kwargs: 关键字参数
    Where the parameters, that are not mentioned, are optional:
    transform - Apply a transformation on the data. The supported transformations are:
        "moving_average" - Replace every value Vi with the average of the last "moving_average_base" values.
        "consecutive_differences" - Replace every value Vi with the value Vi - Vi-1 where i is the position in the list. Automatically fetches some extra data needed in order to compute the first value.
        "percent_change" - Replace every value Vi with the percent change of Vi-1 and Vi ( (Vi / Vi-1 - 1) * 100) where i is the position in the list. Automatically fetches some extra data needed in order to compute the first value.
    aggregation - the aggregation which is used for the query results.

    :return:
    """

    print(f'开始获取指标{indic_name}数据')
    indic_name1 = indic_name + '/' + asset

    ii = 1
    while ii <= test_times:  # 最多重连test_times次
        try:
            df = san.get(indic_name1, from_date=start_date, to_date=end_date, interval=interval)
        except Exception as e:
            if san.is_rate_limit_exception(e):
                rate_limit_seconds = san.rate_limit_time_left(e)
                print(f"Will sleep for {rate_limit_seconds}")
                time.sleep(rate_limit_seconds)
                ii += 1  # 等待下次重试
            if 'is too complex' in str(e):
                print(f'数据太长无法一次性获取， 需要分段获取')
                complexity = int((str(e).split('complexity is ')[2]).split(' and maximum is ')[0])
                num = int(np.ceil(complexity / 20000))
                time_range = list(pd.date_range(start_date, end_date).strftime("%Y-%m-%d"))
                time_gap = int(np.ceil(len(time_range) / num))
                time_range_list = []
                for i in range(num):
                    if i != num - 1:
                        time_range_list.append([time_range[i * time_gap], time_range[(i + 1) * time_gap - 1]])
                    else:
                        time_range_list.append([time_range[i * time_gap], time_range[-1]])
                res = []
                for time_range in time_range_list:
                    df = san.get(indic_name1, from_date=time_range[0], to_date=time_range[1], interval=interval)
                    res.append(df)
                df = pd.concat(res, axis=1)
                break  # 分配链接成功，不需继续尝试链接
            else:
                print(f'指标{indic_name}数据获取失败')
                return None
        else:  # 链接成功，不需继续尝试链接
            break

    df.index = df.index.strftime("%Y-%m-%d")
    if len(df.columns) == 1:
        df.columns = [f'{indic_name}']
    else:
        print(f'指标{indic_name}数据为多列，无法对指标列重命名')

    print(f'指标{indic_name}数据获取成功')

    return df


def get_indicators_via_http(indic_name=None, asset='bitcoin', start_date=None, end_date=None, interval='1d',
                            test_times=5, **kwargs):
    """

    Retrieving data from the API
    The data is fetched by providing a string in the format query/slug and additional parameters.
    query: Available queries can be found in section:
    slug: A list of projects with their slugs, names, etc

    :param str indic_name: 指标名称，上述例子中对应的指标名称为 asopr; indic_code 和 indic_name只需输入其中一个就行
    :param str asset: 资产类别，默认为'bitcoin'， 必须输入完整资产代码，如bitcoin, ethereum
    :param str start_date: 数据开始时间， 默认为None, A date or datetime in iso8601 format specifying the start and end datetime for the returned data or the string for ex: 2018-06-01, or a string, representing the relative datetime
    :param str end_date: 数据结束时间，默认为None
    :param str interval: 数据频率: The interval of the returned data - an integer followed by one of: s, m, h, d or w;
                         ###  取更低频率，默认聚合方式为平均  ###
    :param kwargs: 关键字参数
        Where the parameters, that are not mentioned, are optional:
        transform - Apply a transformation on the data. The supported transformations are:
            "moving_average" - Replace every value Vi with the average of the last "moving_average_base" values.
            例如： transform={"type": "moving_average", "moving_average_base": 100}
            "consecutive_differences" - Replace every value Vi with the value Vi - Vi-1 where i is the position in the list.
                                        Automatically fetches some extra data needed in order to compute the first value.
            "percent_change" - Replace every value Vi with the percent change of Vi-1 and Vi ( (Vi / Vi-1 - 1) * 100)
                               where i is the position in the list. Automatically fetches some extra data needed in order to compute the first value.
        aggregation - the aggregation which is used for the query results.
            例如：aggregation="LAST"

        (
          interval="1d",
          transform={"type": "moving_average", "moving_average_base": 100},
          aggregation="LAST"
        )
    :return:
    """

    print(f'开始获取指标{indic_name}数据')

    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'}
    # cookies_url = "https://api.santiment.net"
    # session = requests.Session()
    # req = session.get(cookies_url, headers=header)
    # Cookie = dict(session.cookies)

    # _sanbase_sid 大概每个月需要更新一次，获取方式为登录sanbase, 再打开https://api.santiment.net/graphiql， 再F12获取cookie
    Cookie = {
        # '_sanbase_sid': 'SFMyNTY.g3QAAAADbQAAAAxhY2Nlc3NfdG9rZW5tAAABwmV5SmhiR2NpT2lKSVV6VXhNaUlzSW5SNWNDSTZJa3BYVkNKOS5leUpoZFdRaU9'
        #                 'pSnpZVzUwYVcxbGJuUWlMQ0pqYkdsbGJuUWlPaUpEYUhKdmJXVWdPVGd1TUM0ME56VTRMamd5SWl3aVpYaHdJam94TmpRME9UazNNVFV3TE'
        #                 'NKcFlYUWlPakUyTkRRNU9UWTROVEFzSW1semN5STZJbk5oYm5ScGJXVnVkQ0lzSW1wMGFTSTZJak5sTTJRNE5HTmtMV1kzWkdZdE5EYzFOe'
        #                 'TA0TldOakxUSmhZelJrTW1OaU9HTTNZeUlzSW01aVppSTZNVFkwTkRrNU5qZzBPU3dpY0d4aGRHWnZjbTBpT2lKWGFXNWtiM2R6SURFd0lp'
        #                 'd2ljM1ZpSWpvaU1UYzFNRGtpTENKMGVYQWlPaUpoWTJObGMzTWlMQ0owZVhCbElqb2lkWE5sY2w5aFkyTmxjM05mZEc5clpXNGlmUS5KLUN'
        #                 'vZnN2VzVHVFpFRVpjLUlfYURqcE4xRXBnNHBMN2laUTcxWUNkZVNuX2daODZENWtpS3NpeEFQYW1MSHY0eWdQa2NPQ2NqUkpYWnpwQkNfZm'
        #                 'ZyQW0AAAAKYXV0aF90b2tlbm0AAAHCZXlKaGJHY2lPaUpJVXpVeE1pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmhkV1FpT2lKellXNTBhVzFsY'
        #                 'm5RaUxDSmpiR2xsYm5RaU9pSkRhSEp2YldVZ09UZ3VNQzQwTnpVNExqZ3lJaXdpWlhod0lqb3hOalEwT1RrM01UVXdMQ0pwWVhRaU9qRTJOR'
        #                 'FE1T1RZNE5UQXNJbWx6Y3lJNkluTmhiblJwYldWdWRDSXNJbXAwYVNJNklqTmxNMlE0TkdOa0xXWTNaR1l0TkRjMU55MDROV05qTFRKaFl6U'
        #                 'mtNbU5pT0dNM1l5SXNJbTVpWmlJNk1UWTBORGs1TmpnME9Td2ljR3hoZEdadmNtMGlPaUpYYVc1a2IzZHpJREV3SWl3aWMzVmlJam9pTVRj'
        #                 'MU1Ea2lMQ0owZVhBaU9pSmhZMk5sYzNNaUxDSjBlWEJsSWpvaWRYTmxjbDloWTJObGMzTmZkRzlyWlc0aWZRLkotQ29mc3ZXNUdUWkVFWmM'
        #                 'tSV9hRGpwTjFFcGc0cEw3aVpRNzFZQ2RlU25fZ1o4NkQ1a2lLc2l4QVBhbUxIdjR5Z1BrY09DY2pSSlhaenBCQ19mZnJBbQAAAA1yZWZyZX'
        #                 'NoX3Rva2VubQAAAcRleUpoYkdjaU9pSklVelV4TWlJc0luUjVjQ0k2SWtwWFZDSjkuZXlKaGRXUWlPaUp6WVc1MGFXMWxiblFpTENKamJHb'
        #                 'GxiblFpT2lKRGFISnZiV1VnT1RndU1DNDBOelU0TGpneUlpd2laWGh3SWpveE5qUTNOREUyTURVd0xDSnBZWFFpT2pFMk5EUTVPVFk0TlRB'
        #                 'c0ltbHpjeUk2SW5OaGJuUnBiV1Z1ZENJc0ltcDBhU0k2SW1SaVpXTXlOV014TFdVMU1qY3RORGhpWmkxaE5tTTFMVEF5T0dJM05EWmtaalk'
        #                 'zTWlJc0ltNWlaaUk2TVRZME5EazVOamcwT1N3aWNHeGhkR1p2Y20waU9pSlhhVzVrYjNkeklERXdJaXdpYzNWaUlqb2lNVGMxTURraUxDSj'
        #                 'BlWEFpT2lKeVpXWnlaWE5vSWl3aWRIbHdaU0k2SW5WelpYSmZjbVZtY21WemFGOTBiMnRsYmlKOS5DWWIxNEtXei1tMjZfb0FaRDZGSl9sb'
        #                 'ktWdTU0RWVHekRiUlZwc0dwMllIaHZWUEdPTHhJYzhxbU00M0NTZDBtX3Z2aXp4TDgzbEg2THRRU3ZoYWJfdw.nyVWsg6_mqReQc57Zv9Js'
        #                 'xBTeXo5duIuxgA7lLcI9mw',
        '_sanbase_sid': 'SFMyNTY.g3QAAAADbQAAAAxhY2Nlc3NfdG9rZW5tAAABvmV5SmhiR2NpT2lKSVV6VXhNaUlzSW5SNWNDSTZJa3BYVkNKOS5leUpoZFdRaU9pSn'
                        'pZVzUwYVcxbGJuUWlMQ0pqYkdsbGJuUWlPaUpEYUhKdmJXVWdNVEF4TGpBdU1DNHdJaXdpWlhod0lqb3hOalUzTURFeU5UazBMQ0pwWVhRaU9q'
                        'RTJOVGN3TVRJeU9UUXNJbWx6Y3lJNkluTmhiblJwYldWdWRDSXNJbXAwYVNJNklqYzNabU5sWWpoaExURXdObVV0TkRrMFlpMDVaamcyTFRCal'
                        'l6Wm1ORE0yWVRjNFlpSXNJbTVpWmlJNk1UWTFOekF4TWpJNU15d2ljR3hoZEdadmNtMGlPaUpYYVc1a2IzZHpJREV3SWl3aWMzVmlJam9pTVRj'
                        'MU1Ea2lMQ0owZVhBaU9pSmhZMk5sYzNNaUxDSjBlWEJsSWpvaWRYTmxjbDloWTJObGMzTmZkRzlyWlc0aWZRLlg1UnB0OHZFZTFQRmJ1Tl93eE'
                        'J0V1hzOFFUODMxdXlDOUpKWlJxT003cFl2ZG5SeEg0bV9vbzdDODJEU3dJNjZQYVktTW1mcGxfdDhid0hDdlJuZ09nbQAAAAphdXRoX3Rva2Vu'
                        'bQAAAb5leUpoYkdjaU9pSklVelV4TWlJc0luUjVjQ0k2SWtwWFZDSjkuZXlKaGRXUWlPaUp6WVc1MGFXMWxiblFpTENKamJHbGxiblFpT2lKRG'
                        'FISnZiV1VnTVRBeExqQXVNQzR3SWl3aVpYaHdJam94TmpVM01ERXlOVGswTENKcFlYUWlPakUyTlRjd01USXlPVFFzSW1semN5STZJbk5oYm5S'
                        'cGJXVnVkQ0lzSW1wMGFTSTZJamMzWm1ObFlqaGhMVEV3Tm1VdE5EazBZaTA1WmpnMkxUQmpZelptTkRNMllUYzRZaUlzSW01aVppSTZNVFkxTn'
                        'pBeE1qSTVNeXdpY0d4aGRHWnZjbTBpT2lKWGFXNWtiM2R6SURFd0lpd2ljM1ZpSWpvaU1UYzFNRGtpTENKMGVYQWlPaUpoWTJObGMzTWlMQ0ow'
                        'ZVhCbElqb2lkWE5sY2w5aFkyTmxjM05mZEc5clpXNGlmUS5YNVJwdDh2RWUxUEZidU5fd3hCdFdYczhRVDgzMXV5QzlKSlpScU9NN3BZdmRuUn'
                        'hING1fb283QzgyRFN3STY2UGFZLU1tZnBsX3Q4YndIQ3ZSbmdPZ20AAAANcmVmcmVzaF90b2tlbm0AAAHAZXlKaGJHY2lPaUpJVXpVeE1pSXNJ'
                        'blI1Y0NJNklrcFhWQ0o5LmV5SmhkV1FpT2lKellXNTBhVzFsYm5RaUxDSmpiR2xsYm5RaU9pSkRhSEp2YldVZ01UQXhMakF1TUM0d0lpd2laWG'
                        'h3SWpveE5qVTVORE14TkRrMExDSnBZWFFpT2pFMk5UY3dNVEl5T1RRc0ltbHpjeUk2SW5OaGJuUnBiV1Z1ZENJc0ltcDBhU0k2SWpKbVpqVTFN'
                        'VGN4TFdabU1qUXRORGs1T0MwNU9EY3pMVE5sTkdFeFpHRXpNVEF5WWlJc0ltNWlaaUk2TVRZMU56QXhNakk1TXl3aWNHeGhkR1p2Y20waU9pSl'
                        'hhVzVrYjNkeklERXdJaXdpYzNWaUlqb2lNVGMxTURraUxDSjBlWEFpT2lKeVpXWnlaWE5vSWl3aWRIbHdaU0k2SW5WelpYSmZjbVZtY21WemFG'
                        'OTBiMnRsYmlKOS44T1lJLWc3T1M2MWxxakdaV2pPUUdhcTFHLXVZMG9vV3Y4djEwa3R0Q2Z3bVhyVjNRRjB6bk00SXhlbkJ5YzdxMTlieEtCLX'
                        'BadUNTT0tOb3dnd2VFUQ.Uolsu2HubKiC871vzjq8D_UBiq8eOZfQN7YIjfCWmz0'}

    post_json = {
        'query': """{getMetric(metric: """ + f'"{indic_name}"' + """){
        timeseriesData(
          slug: """ + f'"{asset}"' + """
          from: """ + f'"{start_date}' + """T00:00:01Z"
          to: """ + f'"{end_date}' + """T23:59:59Z"
          interval: """ + f'"{interval}"' + """){
            datetime
            value
          }
      }
    }"""}

    url = "https://api.santiment.net/graphiql?"
    ii = 1
    while ii <= test_times:  # 最多重连test_times次
        res = requests.post(url, headers=header, data=post_json, timeout=30, cookies=Cookie)
        if res.status_code == 200:
            if 'errors' in res.text:
                print(f'指标{indic_name}数据获取失败，{res.status_code}; {res.text}')
                time.sleep(5)
                ii += 1
            else:
                break  # 链接成功，不需继续尝试链接
        else:
            print(f'指标{indic_name}数据获取失败，{res.status_code}; {res.text}')
            time.sleep(5)
            ii += 1

    if (res.status_code != 200) or ('errors' in res.text):
        raise RuntimeError(f'已经重连了{test_times}次，还是无法成功链接，请检查查询语句、网络链接或账户权限')

    # convert to pandas dataframe
    try:
        data = json.loads(res.text, )
        data = data['data']['getMetric']['timeseriesData']
        columns = list(set(data[0].keys()))
        df = pd.DataFrame.from_records(data, columns=columns)
        if 's' in interval or 'm' in interval or 'h' in interval:  # 如果数据频率为秒，分，时
            df['datetime'] = df['datetime'].apply(lambda x: x[:10] + ' ' + x[11:19])
        else:
            df['datetime'] = df['datetime'].apply(lambda x: x[:10])
    except Exception:  # 其他未知错误
        print(f'指标{indic_name}数据获取正常，但转换成Dataframe时失败')
        return None

    df.rename(columns={'datetime': 'end_date'}, inplace=True)
    df.drop_duplicates(subset=['end_date'], inplace=True)  # 同一天多条记录进行去重
    df.set_index('end_date', inplace=True)
    if len(df.columns) == 1:
        df.columns = [f'{indic_name}']
    else:
        print(f'指标{indic_name}数据为多列，无法对指标列重命名')
    print(f'指标{indic_name}数据获取成功')

    return df


def get_indicator_info(indic_name='dev_activity'):
    """
    Fetching the metadata for an on-chain metric.

    availableSlugs - A list of all slugs available for this metric.
    defaultAggregation - If big interval are queried, all values that fall into this interval will be aggregated with this aggregation.
    humanReadableName - A name of the metric suitable for showing to users.
    isAccessible - True if the metric is accessible. If API key is configured, checks the API plan subscriptions. False if the metric is not accessbile. For example circulation_1d requires PRO plan subscription in order to be accessbile at all.
    isRestricted - True if time restrictions apply to the metric and your current plan (Free if no API key is configured). Check restrictedFrom and restrictedTo.
    restrictedFrom - The first datetime available of that metric for your current plan.
    restrictedTo - The last datetime available of that metric and your current plan.

    :param str indic_name:
    :return:

    {'availableSlugs': ['0chain', '0x', '0xbtc', '0xcert', '1sg', ...],
    'defaultAggregation': 'AVG',
    'humanReadableName': 'NVT (Using Circulation)',
    'isAccessible': True,
    'isRestricted': True,
    'restrictedFrom': '2020-03-21T08:44:14Z',
    'restrictedTo': '2020-06-17T08:44:14Z'}

    """

    res = san.metadata(indic_name,
                       arr=['availableSlugs', 'defaultAggregation', 'humanReadableName',
                            'isAccessible', 'isRestricted',
                            'restrictedFrom', 'restrictedTo'])

    return res


class sanbase_datas:
    def __init__(self):
        self.myRequest = None

        # self.headers = {
        #     'accept': 'application/json',
        #     'accept-encoding': 'gzip, deflate, br',
        #     'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh-TW;q=0.7,zh;q=0.6',
        #     'cache-control': 'no-cache',
        #     'content-type': 'application/json',
        #     'host': 'api.santiment.net',
        #     'origin': 'https://api.santiment.net',
        #     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
        # }

        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
        }

    def get_ready(self):
        self.myRequest = MyRequest()

    @deco_retry(retry=5, retry_sleep=15)
    def sanbase_trans(self):
        self.get_ready()
        # print("china_future_trans is starting...")

        try:
            cookies_url = "https://api.santiment.net"
            get_cookies = self.myRequest.get_cookie(cookies_url)
            get_cookies = {
                '_sanbase_sid': 'SFMyNTY.g3QAAAADbQAAAAxhY2Nlc3NfdG9rZW5tAAABnmV5SmhiR2NpT2lKSVV6VXhNaUlzSW5SNWNDSTZJa3BYVkNKOS5leUpoZFdRaU9pSnpZVzUwYVcxbGJuUWlMQ0pqYkdsbGJuUWlPaUpEYUhKdmJXVWdPVFV1TUM0ME5qTTRMalk1SWl3aVpYaHdJam94TmpNM05qTTRNak0wTENKcFlYUWlPakUyTXpjMk16YzVNelFzSW1semN5STZJbk5oYm5ScGJXVnVkQ0lzSW1wMGFTSTZJakExTVdVeE5UZGhMVFV6TVRndE5ESmpNUzFoTURJNExUTXdOVFl5TldFeE1USmpZeUlzSW01aVppSTZNVFl6TnpZek56a3pNeXdpY0d4aGRHWnZjbTBpT2lKWGFXNWtiM2R6SURFd0lpd2ljM1ZpSWpvaU1UYzFNRGtpTENKMGVYQWlPaUpoWTJObGMzTWlmUS5QcUtoblVfNDVFQ2piTmtLU1lIbXc0OHRCaXpKdlpsQ1I1V05xY3hWSlp1SzFHS2lzaTE3TE03TllrdHdJdVRwY0hfbDJ3Y21qTVRkX1FiMVVIR2FPZ20AAAAKYXV0aF90b2tlbm0AAAGeZXlKaGJHY2lPaUpJVXpVeE1pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmhkV1FpT2lKellXNTBhVzFsYm5RaUxDSmpiR2xsYm5RaU9pSkRhSEp2YldVZ09UVXVNQzQwTmpNNExqWTVJaXdpWlhod0lqb3hOak0zTmpNNE1qTTBMQ0pwWVhRaU9qRTJNemMyTXpjNU16UXNJbWx6Y3lJNkluTmhiblJwYldWdWRDSXNJbXAwYVNJNklqQTFNV1V4TlRkaExUVXpNVGd0TkRKak1TMWhNREk0TFRNd05UWXlOV0V4TVRKall5SXNJbTVpWmlJNk1UWXpOell6Tnprek15d2ljR3hoZEdadmNtMGlPaUpYYVc1a2IzZHpJREV3SWl3aWMzVmlJam9pTVRjMU1Ea2lMQ0owZVhBaU9pSmhZMk5sYzNNaWZRLlBxS2huVV80NUVDamJOa0tTWUhtdzQ4dEJpekp2WmxDUjVXTnFjeFZKWnVLMUdLaXNpMTdMTTdOWWt0d0l1VHBjSF9sMndjbWpNVGRfUWIxVUhHYU9nbQAAAA1yZWZyZXNoX3Rva2VubQAAAZ9leUpoYkdjaU9pSklVelV4TWlJc0luUjVjQ0k2SWtwWFZDSjkuZXlKaGRXUWlPaUp6WVc1MGFXMWxiblFpTENKamJHbGxiblFpT2lKRGFISnZiV1VnT1RVdU1DNDBOak00TGpZNUlpd2laWGh3SWpveE5qTTVOemszT1RreExDSnBZWFFpT2pFMk16Y3pOemczT1RFc0ltbHpjeUk2SW5OaGJuUnBiV1Z1ZENJc0ltcDBhU0k2SW1SbU5XUTBZMlkzTFdNeU1HTXROR1JpTXkxaVl6UmlMVGMwTmpGbVltRTRNamhpWVNJc0ltNWlaaUk2TVRZek56TTNPRGM1TUN3aWNHeGhkR1p2Y20waU9pSlhhVzVrYjNkeklERXdJaXdpYzNWaUlqb2lNVGMxTURraUxDSjBlWEFpT2lKeVpXWnlaWE5vSW4wLjVZZ3p0aFBKVlVRQmVFMTNsVmREc0NGd09DUWVDMnRQWEF3X2pNYXpiRUVhVG5JYmFWeTU2cGlkTkxZamxkLVhrZ2VLQVE1V0VmblpTcWdwY2s4d1p3.KBpNKBbFwPfgyfowrLB8uXeZ91e_ZXZLwm716A9SHSA'}
            print(get_cookies)

            post_json = {
                'query': """{getMetric(metric: "sentiment_balance_bitcointalk"){
                                timeseriesData(
                                  slug: "bitcoin"
                                  from: "2020-02-10T07:00:00Z"
                                  to: "2021-11-23T07:00:00Z"
                                  interval: "1d"){
                                    datetime
                                    value
                                  }
                              }
                            }"""
            }

            url = "https://api.santiment.net/graphiql?"

            response_content = self.myRequest.post_dataJson_cookies(url, self.headers, post_json, get_cookies)
            response_content_dict = json.loads(response_content)

        except Exception as err:
            print("Fault: err: '%s'" % err)
        finally:
            pass

        return response_content_dict


if __name__ == '__main__':
    # get_asset_list()
    # get_metrics_list()
    # get_metrics_list(asset='bitcoin')
    # metrics_list = get_social_metrics_list(asset='bitcoin')
    # get_top_social_gainer_loser()
    # get_indicators(indic_name='sentiment_balance_bitcointalk', asset='bitcoin', start_date='2015-01-01', end_date='2021-11-20', interval='1d')
    # get_indicator_info(indic_name='social_volume_twitter')

    # res = []
    # for indic in get_social_metrics_list(asset='bitcoin'):
    #     a = get_indicators(indic_name=indic, asset='bitcoin', start_date='2015-01-01', end_date='2021-11-16', interval='1d')
    #     res.append(a)
    # res = pd.concat(res, axis=1)
    # res

    aaa = get_indicators_via_http(indic_name='social_dominance_reddit_1h_moving_average', asset='bitcoin',
                                  start_date='2021-12-07',
                                  end_date='2021-12-09', interval='5m')
    aaa.to_csv('social_dominance_reddit_1h_moving_average_1d.csv')

    # %% 手动保持数据
    # 进入https://api.santiment.net/graphiql
    # 执行相应的查询语句，然后粘贴相应的数据赋值给data， 如
    # {
    #     getMetric(metric: "unique_social_volume_total_5m") {
    #         timeseriesData(slug: "bitcoin", from: "2015-01-01T07:00:00Z", to: "2021-11-14T07:00:00Z", interval: "1d") {
    #         datetime
    #         value
    #         }
    #     }
    # }
    indic_name = 'unique_social_volume_total_5m'
    data = {}
    data = data['data']['getMetric']['timeseriesData']
    columns = list(set(data[0].keys()))
    df = pd.DataFrame.from_records(data, columns=columns)
    df.rename(columns={'datetime': 'end_date'}, inplace=True)
    df['end_date'] = df['end_date'].apply(lambda x: x[:10])
    df = df.iloc[:-1, :]  # 剔除最后一行
    df.set_index('end_date', inplace=True)
    if len(df.columns) == 1:
        df.columns = [f'{indic_name}']
    else:
        print(f'指标{indic_name}数据为多列，无法对指标列重命名')
    print(f'指标{indic_name}数据获取成功')
    file_path = os.path.join(DATA_DIR, f'social_data')
    os.makedirs(file_path, exist_ok=True)
    file_name = os.path.join(file_path, f'{indic_name}')
    df.to_csv(f'{file_name}.csv')
