# _*_ coding:utf-8 _*_
# @File  : futures_daily_rank.py
# @Time  : 2021-06-25  15:05
# @Author: zizle
import json
import os
import re
import time
import zipfile

import requests
import datetime
import pandas as pd
import numpy as np
from hutool import utils as hu_tool
from logger import logger


class CZCEDailyRank(object):

    def __init__(self, parser_date: datetime.datetime):
        self.parser_year = parser_date.strftime('%Y')
        self.parser_day = parser_date.strftime('%Y%m%d')
        self.url = f"http://www.czce.com.cn/cn/DFSStaticFiles/Future/{self.parser_year}/{self.parser_day}/FutureDataHolding.xls"
        self.headers = {
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
        }
        self.temp_folder = 'temp/czce/rank'
        self.filepath = f'{self.temp_folder}/{self.parser_day}.xls'
        if not os.path.exists(self.temp_folder):
            os.makedirs(self.temp_folder)

    def get_rank_xls_file(self):
        try:
            r = requests.get(url=self.url, headers=self.headers)
            content = r.content
            if r.status_code == 200:
                with open(self.filepath, 'wb') as f:
                    f.write(content)
            else:
                if os.path.exists(self.filepath):
                    os.remove(self.filepath)
        except Exception as e:
            logger.error(f'Spider {self.parser_day} CZCE rank Error:{e}')
        else:
            logger.info(f'Spider {self.parser_day} CZCE rank Successfully!')

    def get_daily_rank(self):
        if not os.path.exists(self.filepath):
            self.get_rank_xls_file()
        # 读取文件
        xls_df = pd.read_excel(self.filepath, thousands=',')
        variety_index_dict = dict()  # 存品种的起始终止行
        contract_index_dict = dict()  # 存合约的起始终止行
        variety_dict = dict()  # 存品种的dict
        contract_set = set()  # 存合约的dict
        variety_en = None
        contract_en = None
        is_variety = True
        # 遍历每一行，取出每个品种的数据表
        # print(self.date)
        for row_content in xls_df.itertuples():
            # print(row_content[1])
            info_for_match_ = hu_tool.full_width_to_half_width(str(row_content[1]))
            search_variety = re.search(r'品种:(.*)\s日期.*', info_for_match_)  # 找到品种行开头
            search_contract = re.search(r'合约:(.*)\s日期.*', info_for_match_)  # 找到合约
            search_sum = re.search(r'合计', info_for_match_)
            if search_variety:  # 如果找到品种记录下品种和开始行
                zh_en_variety = search_variety.group(1)
                variety_name, variety_en = hu_tool.split_zh_en(zh_en_variety)
                if variety_en == "PTA":
                    variety_en = "TA"
                variety_dict[variety_en] = variety_name
                variety_index_dict[variety_en] = [row_content[0] + 1]
                is_variety = True
            elif search_contract:  # 如果找到合约,从品种数据去品种中文名,记录下开始行
                contract_en = search_contract.group(1)
                contract_set.add(contract_en)
                contract_index_dict[contract_en] = [row_content[0] + 1]
                is_variety = False
            else:
                pass  # 无则继续
            if search_sum and (variety_en or contract_en):  # 如果找到合计行，记录下当前品种的结束行
                if is_variety:

                    variety_index_dict[variety_en].append(row_content[0])
                else:
                    contract_index_dict[contract_en].append(row_content[0])
            else:
                pass  # 无则继续
        # 整理数据
        column_indexes = ['variety_en', 'contract', 'rank',
                          'trade_company', 'trade', 'trade_increase',
                          'long_position_company', 'long_position', 'long_position_increase',
                          'short_position_company', 'short_position', 'short_position_increase']

        result_df = pd.DataFrame(columns=column_indexes)
        # 每个品种数据框
        for variety_en in variety_dict:
            variety_index_range = variety_index_dict[variety_en]
            variety_df = xls_df.iloc[variety_index_range[0]:variety_index_range[1] + 1, :]
            variety_df = self._parser_rank_sub_df(variety_name=variety_dict[variety_en], sub_df=variety_df)
            # 填充品种代码和合约的值
            variety_df["variety_en"] = [variety_en for _ in range(variety_df.shape[0])]
            variety_df["contract"] = [variety_en for _ in range(variety_df.shape[0])]
            # print(variety_en, "\n", variety_df)
            result_df = pd.concat([result_df, variety_df])
        # 每个合约数据框
        for contract in contract_set:
            contract_index_range = contract_index_dict[contract]
            # variety_key = 'PTA' if contract[:2] == 'TA' else contract[:2]
            variety_key = contract[:2]
            # print(variety_dict[variety_key], contract, contract_index_range)
            contract_df = xls_df.iloc[contract_index_range[0]:contract_index_range[1] + 1, :]
            contract_df = self._parser_rank_sub_df(variety_name=variety_dict[variety_key], sub_df=contract_df)
            # 填充品种代码和合约的值
            contract_df["variety_en"] = [variety_key for _ in range(contract_df.shape[0])]
            target_contract = hu_tool.modify_contract_express(contract.strip(), self.parser_day)
            contract_df["contract"] = [target_contract for _ in range(contract_df.shape[0])]
            # print(contract, "\n", contract_df)
            result_df = pd.concat([result_df, contract_df])
        rank_data = result_df.to_dict(orient='records')
        # for r in rank_data:
        #     print(r)
        return rank_data, f'get {self.parser_day} CZCE rank Successfully!'

    def _parser_rank_sub_df(self, variety_name, sub_df):
        """ 解析每个品种或合约的数据框 """
        column_indexes = sub_df.iloc[0].values.tolist()
        # print(column_indexes)
        if column_indexes != ['名次', '会员简称', '成交量（手）', '增减量', '会员简称', '持买仓量', '增减量', '会员简称', '持卖仓量', '增减量']:
            logger.error(f'Parser {self.parser_day} CZCE Rank Error: columns error!')
            return pd.DataFrame()
        column_indexes = ['名次', '成交会员', '成交量', '成交增减', '买仓会员', '买仓量', '买仓增减', '卖仓会员', '卖仓量', '卖仓增减']
        sub_df.columns = column_indexes
        # sub_df = sub_df.reindex(columns=column_indexes)  # 重新调整列
        sub_df = sub_df.drop(sub_df.index[0])  # 删除第一行
        # 去除合计行
        sub_df['名次'] = sub_df['名次'].apply(lambda x: str(x))
        sub_df = sub_df[~sub_df['名次'].str.contains('合计')]  # 选取不含有合计的行
        sub_df[['名次', '成交量', '成交增减', '买仓量', '买仓增减', '卖仓量', '卖仓增减']] = sub_df[['名次', '成交量', '成交增减', '买仓量', '买仓增减', '卖仓量', '卖仓增减']].replace('-', 0).astype(int)
        # 增加品种代码、合约两列
        new_column_indexes = ["品种代码", "合约"] + column_indexes

        sub_df = sub_df.reindex(columns=new_column_indexes)
        reset_indexes = ['variety_en', 'contract', 'rank',
                         'trade_company', 'trade', 'trade_increase',
                         'long_position_company', 'long_position', 'long_position_increase',
                         'short_position_company', 'short_position', 'short_position_increase']
        sub_df.columns = reset_indexes  # 修改列名
        return sub_df


class SHFEDailyRank(object):
    def __init__(self, parser_date: datetime.datetime):
        self.parser_year = parser_date.strftime('%Y')
        self.parser_day = parser_date.strftime('%Y%m%d')
        self.url = f"http://www.shfe.com.cn/data/dailydata/kx/pm{self.parser_day}.dat"
        self.headers = {
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
        }
        self.temp_folder = 'temp/shfe/rank'
        self.filepath = f'{self.temp_folder}/{self.parser_day}.json'
        if not os.path.exists(self.temp_folder):
            os.makedirs(self.temp_folder)

    def get_rank_json_file(self):
        try:
            r = requests.get(url=self.url, headers=self.headers)
            content = r.json()
            if r.status_code == 200:
                with open(self.filepath, 'w', encoding='utf8') as f:
                    json.dump(content, f)
            else:
                if os.path.exists(self.filepath):
                    os.remove(self.filepath)
        except Exception as e:
            logger.error(f'Spider {self.parser_day} SHFE rank Error:{e}')
        else:
            logger.info(f'Spider {self.parser_day} SHFE rank Successfully!')

    def get_daily_rank(self):
        if not os.path.exists(self.filepath):
            self.get_rank_json_file()
        with open(self.filepath, "r", encoding="utf-8") as reader:
            source_content = json.load(reader)
        json_df = pd.DataFrame(source_content["o_cursor"])
        # 取排名在(1~20的数据[-1:期货公司总计,0:非期货公司总计,999品种合约总计]
        json_df = json_df[(json_df["RANK"] >= 1) & (json_df['RANK'] <= 20)]
        # 去除字符串空格
        json_df["INSTRUMENTID"] = json_df["INSTRUMENTID"].str.strip().str.upper().str.replace("ALL", '')
        json_df["PARTICIPANTABBR1"] = json_df["PARTICIPANTABBR1"].str.strip()
        json_df["PARTICIPANTABBR2"] = json_df["PARTICIPANTABBR2"].str.strip()
        json_df["PARTICIPANTABBR3"] = json_df["PARTICIPANTABBR3"].str.strip()
        # 空值处理补0
        json_df.replace(to_replace="^\s*$", value=np.nan, regex=True, inplace=True)
        json_df = json_df.fillna(0)
        # 新增品种代码列和DATE列
        json_df["VARIETYEN"] = json_df["INSTRUMENTID"].apply(hu_tool.split_number_en).apply(lambda x: x[0].upper())
        # 重新设置索引
        json_df = json_df.reindex(columns=["VARIETYEN", "INSTRUMENTID", "RANK", "PARTICIPANTABBR1", "CJ1", "CJ1_CHG", "PARTICIPANTABBR2", "CJ2", "CJ2_CHG","PARTICIPANTABBR3", "CJ3", "CJ3_CHG"])
        json_df.columns = ["variety_en", "contract", "rank",
                           "trade_company", "trade", "trade_increase",
                           "long_position_company", "long_position", "long_position_increase",
                           "short_position_company", "short_position", "short_position_increase"]

        # for i in json_df.itertuples():
        #     print(i)
        # print(json_df.shape)
        # 处理数据类型
        if not json_df.empty:
            # 转换数据类型
            json_df["rank"] = json_df["rank"].astype("int")
            json_df["trade_company"] = json_df["trade_company"].apply(lambda x: '-' if x == 0 else x)
            json_df["trade"] = json_df["trade"].astype("int")
            json_df["trade_increase"] = json_df["trade_increase"].astype("int")
            json_df["long_position_company"] = json_df["long_position_company"].apply(lambda x: '-' if x == 0 else x)
            json_df["long_position"] = json_df["long_position"].astype("int")
            json_df["long_position_increase"] = json_df["long_position_increase"].astype("int")
            json_df["short_position_company"] = json_df["short_position_company"].apply(lambda x: '-' if x == 0 else x)
            json_df["short_position"] = json_df["short_position"].astype("int")
            json_df["short_position_increase"] = json_df["short_position_increase"].astype("int")
        rank_data = json_df.to_dict(orient='records')
        return rank_data, f'get {self.parser_day} SHFE rank Successfully!'


class DCEDailyRank(object):
    def __init__(self, parser_date: datetime.datetime):
        self.parser_year = parser_date.strftime('%Y')
        self.parser_day = parser_date.strftime('%Y%m%d')
        self.url = "http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesBatchData.html"
        self.form_params = {
            'memberDealPosiQuotes.variety': 'a',
            'memberDealPosiQuotes.trade_type': '0',
            'year': str(parser_date.year),
            'month': str(parser_date.month - 1),
            'day': parser_date.strftime("%d"),
            'contract.contract_id': 'a2009',
            'contract.variety_id': 'a',
            'batchExportFlag': 'batch'
        }
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
        }
        self.temp_folder = 'temp/dce/rank'
        self.filepath = f'{self.temp_folder}/{self.parser_day}.zip'
        if not os.path.exists(self.temp_folder):
            os.makedirs(self.temp_folder)

    def get_rank_xls_file(self):
        try:
            r = requests.post(url=self.url, data=self.form_params, headers=self.headers)
            content = r.content
            if r.status_code == 200:
                with open(self.filepath, 'wb') as f:
                    f.write(content)
            else:
                if os.path.exists(self.filepath):
                    os.remove(self.filepath)
        except Exception as e:
            logger.error(f'Spider {self.parser_day} DCE rank Error:{e}')
        else:
            logger.info(f'Spider {self.parser_day} DCE rank Successfully!')

    def get_daily_rank(self):
        if not os.path.exists(self.filepath):
            self.get_rank_xls_file()
        # 解压文件的缓存目录
        cache_folder = os.path.join(self.temp_folder, '/cache/{}/'.format(self.parser_day))
        # 解压文件到文件夹
        zip_file = zipfile.ZipFile(self.filepath)
        zip_list = zip_file.namelist()
        for filename in zip_list:
            # filename = filename.encode('cp437').decode('gbk')  # 这样做会无法提取文件。遂修改源代码
            zip_file.extract(filename, cache_folder)  # 循环解压文件到指定目录
        zip_file.close()
        # 取解压后的文件夹下的文件，逐个读取内容解析得到最终的数据集
        value_df = self._parser_variety_rank(cache_folder)
        if not value_df.empty:
            # 填充空值(合并后产生空值)
            value_df = value_df.fillna('-')
            # 将数据需要为int列转为int
            value_df["rank"] = value_df["rank"].apply(self.str_to_int)
            value_df["trade"] = value_df["trade"].apply(self.str_to_int)
            value_df["trade_increase"] = value_df["trade_increase"].apply(self.str_to_int)
            value_df["long_position"] = value_df["long_position"].apply(self.str_to_int)
            value_df["long_position_increase"] = value_df["long_position_increase"].apply(self.str_to_int)
            value_df["short_position"] = value_df["short_position"].apply(self.str_to_int)
            value_df["short_position_increase"] = value_df["short_position_increase"].apply(self.str_to_int)
            # # 日期转为整形时间戳
            # value_df['date'] = value_df['date'].apply(lambda x: int(datetime.datetime.strptime(x, '%Y%m%d').timestamp()))
        rank_data = value_df.to_dict(orient='records')
        return rank_data, 'Parser DCE-Rank Successfully!'

    def str_to_int(self, ustring):
        if ustring == "-":
            return 0
        else:
            return int(ustring.replace(',', ''))

    def _parser_variety_rank(self, cache_folder):
        """ 读取文件夹内文件解析 """
        all_data_df = pd.DataFrame(columns=["date", "variety_en", "contract", "rank",
                                         "trade_company", "trade", "trade_increase",
                                         "long_position_company", "long_position", "long_position_increase",
                                         "short_position_company", "short_position", "short_position_increase"])

        filename_list = os.listdir(cache_folder)
        for contract_filename in filename_list:
            contract_file_path = os.path.join(cache_folder, contract_filename)
            message_list = contract_filename.split('_')
            c_date = message_list[0]  # 得到日期
            contract = message_list[1].upper()  # 得到合约
            variety_en = hu_tool.split_number_en(message_list[1])[0].upper()  # 得到合约代码
            if self.parser_day < '20160101':  # 解析20160101之前的数据,使用gbk解码
                contract_df = pd.read_table(contract_file_path, encoding="gbk")
            else:  # 解析20160101之后的数据,使用utf8解码
                contract_df = pd.read_table(contract_file_path, sep=' ')
            # 初始化3个容器和对应的置入信号
            # 1 成交量容器
            trade_values = []
            trade_append = False
            # 2 买单量容器
            long_position_values = []
            long_position_append = False
            # 卖单量容器
            short_position_values = []
            short_position_append = False
            for df_row in contract_df.itertuples():
                row_list = df_row[1].split()
                if row_list[0] in ["总计", "会员类别", "期货公司会员",
                                   "非期货公司会员"]:  # 去除总计行否则后面添加报错， 20131126以后多了"会员类别", "期货公司会员", "非期货公司会员"数据
                    continue
                # 相应的数据放入对应的容器中
                if row_list == ['名次', '会员简称', '成交量', '增减']:
                    # 打开成交量容器
                    trade_append = True
                    long_position_append = False
                    short_position_append = False
                    continue
                if row_list == ['名次', '会员简称', '持买单量', '增减']:
                    # 打开买单量容器
                    trade_append = False
                    long_position_append = True
                    short_position_append = False
                    continue
                if row_list == ['名次', '会员简称', '持卖单量', '增减']:
                    # 打开卖单量容器
                    trade_append = False
                    long_position_append = False
                    short_position_append = True
                    continue
                if trade_append and 1 <= int(row_list[0]) <= 20:
                    trade_values.append(
                        {
                            "date": c_date,
                            "contract": contract,
                            "variety_en": variety_en,
                            "rank": row_list[0],
                            "trade_company": row_list[1],
                            "trade": row_list[2],
                            "trade_increase": row_list[3]
                        }
                    )
                if long_position_append and 1 <= int(row_list[0]) <= 20:
                    long_position_values.append(
                        {
                            "date": c_date,
                            "contract": contract,
                            "variety_en": variety_en,
                            "rank": row_list[0],
                            "long_position_company": row_list[1],
                            "long_position": row_list[2],
                            "long_position_increase": row_list[3]

                        }
                    )
                if short_position_append and 1 <= int(row_list[0]) <= 20:
                    short_position_values.append(
                        {
                            "date": c_date,
                            "contract": contract,
                            "variety_en": variety_en,
                            "rank": row_list[0],
                            "short_position_company": row_list[1],
                            "short_position": row_list[2],
                            "short_position_increase": row_list[3]
                        }
                    )
            # 得到3个数据集
            # print("成交量数据集")
            # for t in trade_values:
            #     print(t)
            # print("买单量数据集")
            # for l in long_position_values:
            #     print(l)
            # print("卖单量数据集")
            # for s in short_position_values:
            #     print(s)
            # 将数据集转为DataFrame
            columns_list = ["date", "contract", "variety_en", "rank"]
            trade_df = pd.DataFrame(trade_values, columns=columns_list + ["trade_company", "trade", "trade_increase"])
            long_position_df = pd.DataFrame(long_position_values,
                                         columns=columns_list + ["long_position_company", "long_position",
                                                                 "long_position_increase"])
            short_position_df = pd.DataFrame(short_position_values,
                                          columns=columns_list + ["short_position_company", "short_position",
                                                                  "short_position_increase"])
            # 横向合并
            contract_result_df = pd.merge(
                trade_df, long_position_df,
                on=["date", "contract", "variety_en", "rank"],
                how="outer"
            )
            contract_result_df = pd.merge(
                contract_result_df,
                short_position_df,
                on=["date", "contract", "variety_en", "rank"],
                how="outer"
            )

            # 将数据纵向合并到总DataFrame
            all_data_df = pd.concat([all_data_df, contract_result_df])
        return all_data_df


class CFFEXDailyRank(object):
    def __init__(self, parser_date: datetime.datetime):
        self.parser_date = parser_date
        self.parser_year = parser_date.strftime('%Y')
        self.parser_month = parser_date.strftime("%Y%m")
        self.parser_day = parser_date.strftime('%Y%m%d')
        self.variety_list = self.get_variety_list()
        self.base_url = "http://www.cffex.com.cn/sj/ccpm/{}/{}/{}_1.csv"
        # self.url = f'http://www.cffex.com.cn/sj/ccpm/{parser_date.strftime("%Y%m")}/{parser_date.strftime("%d")}/{}_1.csv'
        self.headers = {
            'Host': 'www.cffex.com.cn',
            'Referer': 'http://www.cffex.com.cn/ccpm/',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
        }
        self.temp_folder = 'temp/cffex/rank'
        # self.filepath = f'{self.temp_folder}/{self.parser_day}.json'
        if not os.path.exists(self.temp_folder):
            os.makedirs(self.temp_folder)

    def get_variety_list(self):
        if "20100416" <= self.parser_day < "20130906":
            variety_list = ["IF"]
        elif "20130906" <= self.parser_day < "20150320":
            variety_list = ["IF", "TF"]
        elif "20150320" <= self.parser_day < "20150416":
            variety_list = ["IF", "TF", "T"]
        elif "20150416" <= self.parser_day < "20180816":
            variety_list = ["IF", "TF", "T", "IC", "IH"]
        else:
            variety_list = ["IF", "TF", "T", "IC", "IH", "TS"]
        return variety_list

    def get_rank_csv_file(self, variety_en):
        url = self.base_url.format(self.parser_month, self.parser_date.strftime("%d"), variety_en)
        filepath = f'{self.temp_folder}/{self.parser_day}_{variety_en}.csv'
        try:
            r = requests.get(url=url, headers=self.headers)
            content = r.content
            if r.status_code == 200:
                with open(filepath, 'wb') as f:
                    f.write(content)
            else:
                if os.path.exists(filepath):
                    os.remove(filepath)
        except Exception as e:
            logger.error(f'Spider {self.parser_day}_{variety_en} CFFEX rank Error:{e}')
        else:
            logger.info(f'Spider {self.parser_day}_{variety_en} CFFEX rank Successfully!')

    def get_daily_rank(self):
        result_df = pd.DataFrame(columns=[])
        for variety in self.variety_list:
            filepath = f'{self.temp_folder}/{self.parser_day}_{variety}.csv'
            if not os.path.exists(filepath):
                time.sleep(1)
                self.get_rank_csv_file(variety)
            # 解析数据
            variety_df, success = self.parser_variety_rank_file(filepath, variety)
            if not success:
                # 删除文件
                if os.path.exists(filepath):
                    os.remove(filepath)
                result_df = pd.DataFrame(columns=[])
                break
            result_df = pd.concat([result_df, variety_df])
        if not result_df.empty:
            # 转换数据类型
            result_df["rank"] = result_df["rank"].astype("int")
            result_df["trade"] = result_df["trade"].astype("int")
            result_df["trade_increase"] = result_df["trade_increase"].astype("int")
            result_df["long_position"] = result_df["long_position"].astype("int")
            result_df["long_position_increase"] = result_df["long_position_increase"].astype("int")
            result_df["short_position"] = result_df["short_position"].astype("int")
            result_df["short_position_increase"] = result_df["short_position_increase"].astype("int")
            # 时间戳的日期
        rank_data = result_df.to_dict(orient='records')
        # for r in rank_data:
        #     print(r)
        return rank_data, f'get {self.parser_day} CFFEX rank Successfully!'

    def parser_variety_rank_file(self, file_path, variety_name):
        """ 使用pandas解析中金所品种的日排名数据 """
        # 读取数据
        try:
            variety_df = pd.read_csv(file_path, encoding="gbk", header=None, sep="\t", thousands=',')
        except Exception as e:
            logger.error(f'Parser CFFEX {self.parser_day}_{variety_name} Error: ParserError!')
            return pd.DataFrame(), False
        # 得到数据开始的行
        # 搜集数据的容器
        variety_ranks = []
        equal_list = ['交易日', '合约', '排名', '成交量排名', '', '', '持买单量排名', '', '', '持卖单量排名', '', '']
        start_get, first_enter = False, False
        for row in variety_df.itertuples():
            row_list = row[1].split(',')
            if row_list == equal_list:
                start_get = True
                first_enter = True
                continue
            if start_get and first_enter:
                # 修改row_list的值
                row_list[0:3] = equal_list[0:3]  # '交易日', '合约', '排名'
                row_list[3] = row_list[3] + "1"
                row_list[6] = row_list[6] + "2"
                row_list[9] = row_list[9] + "3"
                first_enter = False
                variety_ranks.append(row_list)
                continue
            if start_get and not first_enter:
                variety_ranks.append(row_list)

        variety_df = pd.DataFrame(variety_ranks[1:], columns=variety_ranks[0])
        df_columns = ['交易日', '合约', '排名', '会员简称1', '成交量', '比上一交易日增减', '会员简称2', '持买单量', '比上一交易日增减', '会员简称3', '持卖单量',
                      '比上一交易日增减']
        if variety_df.columns.tolist() != df_columns:
            logger.error(f"Parser CFFEX {variety_name} rank Error: columns error!")
            return pd.DataFrame(), False
        # 处理数据
        # 1 修改表头
        variety_df.columns = ["date", "contract", "rank", "trade_company", "trade", "trade_increase",
                              "long_position_company", "long_position", "long_position_increase",
                              "short_position_company", "short_position", "short_position_increase"]
        # 2 去除数据空格
        variety_df = variety_df.replace('\s+', '', regex=True)
        # 3 插入品种列
        variety_df["variety_en"] = [variety_name for _ in range(variety_df.shape[0])]
        # 4 重置索引列
        variety_df = variety_df.reindex(columns=["date", "variety_en", "contract", "rank",
                                                 "trade_company", "trade", "trade_increase",
                                                 "long_position_company", "long_position", "long_position_increase",
                                                 "short_position_company", "short_position", "short_position_increase"])
        return variety_df, True


if __name__ == '__main__':
    pDate = datetime.datetime.today() + datetime.timedelta(days=-3)
    # czce = CZCEDailyRank(pDate)  # 郑商所
    # czce.get_daily_rank()

    # shfe = SHFEDailyRank(pDate)  # 上期所
    # shfe.get_daily_rank()

    # dce = DCEDailyRank(pDate)   # 大商所
    # dce.get_daily_rank()

    cffex = CFFEXDailyRank(pDate)
    cffex.get_daily_rank()




