#  -*- coding: utf-8 -*-
import sys
sys.path.append('/Users/xbs/Code/HunterQuant')
from pymongo import UpdateOne,ASCENDING
from util.database import DB_CONN
from util.stock_util import get_trading_dates
from datetime import datetime, timedelta
from data.daily_crawler import DailyCrawler
import time,os
import pandas as pd
from pathlib import Path
from util.database import base_data_path

"""
从tushare获取股票基础数据，保存到本地的MongoDB数据库中
"""

index_nofq_path = f"{base_data_path}/index_nofq"
code_nofq_path = f"{base_data_path}/code_nofq"

class BasicCrawler:
    def __init__(self):
        self.basic_db = DB_CONN['basic']
        self.stable_db = DB_CONN['stable']
        self.index_cons_db = DB_CONN['index_cons']
        self.ts_code_stable_db = DB_CONN['ts_code_stable']
        self.ts_code_daily_db = DB_CONN['ts_code_daily']
        self.block_db = DB_CONN['block']


    @staticmethod
    def trans_ts_trade_code(trade_date):
        date = f"{trade_date[0:4]}-{trade_date[4:6]}-{trade_date[6:8]}"
        return date

    # def crawl_ts_daily_basic(self,begin_date=None,end_date=None):
    #     try:
    #         if begin_date is None:
    #             begin_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
    #
    #         if end_date is None:
    #             end_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
    #
    #         all_dates = get_trading_dates(begin_date, end_date)
    #
    #         self.ts_code_daily_db.create_index([('code', 1), ('date', 1)])
    #
    #         for date in all_dates:
    #             start_time = time.time()
    #             ts_date = date.replace("-","").strip()
    #             ts_daily_basic_df = ts_pro.daily_basic(trade_date=ts_date)
    #
    #             if 0 == ts_daily_basic_df.shape[0]:
    #                 print(f"{date} 没有获取到tushare原始数据。")
    #                 continue
    #             #if ts_daily_basic_df.count()
    #             ts_daily_basic_df.insert(2, 'code', ts_daily_basic_df['ts_code'].str.slice(0,6))
    #             ts_daily_basic_df['trade_date'] = ts_daily_basic_df['trade_date'].apply(lambda x:self.trans_ts_trade_code(x))
    #             ts_daily_basic_df.rename(columns=({'trade_date': 'date'}),inplace=True)
    #
    #             ts_daily_basic_list = ts_daily_basic_df.to_dict(orient='records')
    #
    #             update_requests = list()
    #             for item in ts_daily_basic_list:
    #
    #                 code_cursor = self.ts_code_daily_db.find(
    #                     {"code": item['code'],"date":item['date']},
    #                     projection={'_id': False})
    #
    #                 # 在已有数据库里找不到的情况下，才去插入新的数据
    #                 if 0 == code_cursor.count():
    #                     update_requests.append(
    #                         UpdateOne(
    #                             {'code': item['code'],"date":item['date']},
    #                             {'$set': item}, upsert=True))
    #
    #             if len(update_requests) > 0:
    #                 update_result = self.ts_code_daily_db.bulk_write(update_requests, ordered=False)
    #                 end_time = time.time()
    #                 print(f'抓取tushare股票每日指标信息，日期：{date}, 插入：{update_result.upserted_count}条，'
    #                       f'更新：{update_result.modified_count}条,耗时: {(end_time - start_time)}秒')
    #
    #     except Exception as e:
    #         print(f"crawl_ts_daily_basic error info : {e}")
    #
    #     finally:
    #         return

    # def crawl_basic(self, begin_date=None, end_date=None):
    #     """
    #     抓取指定时间范围内的股票基础信息
    #     :param begin_date: 开始日期
    #     :param end_date: 结束日期
    #     """
    #
    #     if begin_date is None:
    #         begin_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
    #
    #     if end_date is None:
    #         end_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
    #
    #     all_dates = get_trading_dates(begin_date, end_date)
    #     self.basic_db.create_index([('code', 1), ('date', 1)])
    #
    #     for date in all_dates:
    #         try:
    #             self.crawl_basic_at_date(date)
    #         except:
    #             print('抓取股票基本信息时出错，日期：%s' % date, flush=True)
    #
    #
    # def crawl_basic_at_date(self, date):
    #     """
    #     从Tushare抓取指定日期的股票基本信息
    #     :param date: 日期
    #     """
    #     # 默认推送上一个交易日的数据
    #     start_time = time.time()
    #     df_basics = ts.get_stock_basics(date)
    #
    #     # 如果当日没有基础信息，在不做操作
    #     if df_basics is None:
    #         return
    #
    #     update_requests = []
    #     codes = set(df_basics.index)
    #     for code in codes:
    #         doc = dict(df_basics.loc[code])
    #         try:
    #             # 将20180101转换为2018-01-01的形式
    #             time_to_market = datetime\
    #                 .strptime(str(doc['timeToMarket']), '%Y%m%d')\
    #                 .strftime('%Y-%m-%d')
    #
    #             # 解决流通股本和总股本单位不一致的情况
    #             totals = float(doc['totals'])
    #             # 这里假设最大规模的股本不超过5000亿，股本规模的最大工商银行是3564亿
    #             if totals > 5000:
    #                 totals *= 1E4
    #             else:
    #                 totals *= 1E8
    #
    #             outstanding = float(doc['outstanding'])
    #             # 这里假设最大规模的股本不超过5000亿，股本规模的最大工商银行是3564亿
    #             if outstanding > 5000:
    #                 outstanding *= 1E4
    #             else:
    #                 outstanding *= 1E8
    #
    #             # 保存时增加date字段，因为明天都会有一条数据
    #             doc.update({
    #                 'code': code,
    #                 'date': date,
    #                 'timeToMarket': time_to_market,
    #                 'outstanding': outstanding,
    #                 'totals': totals
    #             })
    #
    #             update_requests.append(
    #                 UpdateOne(
    #                     {'code': code, 'date': date},
    #                     {'$set': doc}, upsert=True))
    #         except:
    #             print('发生异常，股票代码：%s，日期：%s' % (code, date), flush=True)
    #             print(doc, flush=True)
    #
    #     if len(update_requests) > 0:
    #         update_result = self.basic_db.bulk_write(update_requests, ordered=False)
    #         end_time = time.time()
    #         print('抓取股票基本信息，日期：%s, 插入：%4d条，更新：%4d条,耗时: %.3f 秒' %
    #               (date, update_result.upserted_count, update_result.modified_count,(end_time - start_time)), flush=True)
    #
    #     return

    # def crawl_ts_code_stable_data(self):
    #     try:
    #         start_time = time.time()
    #         stable_df = ts_pro.stock_basic()
    #         self.ts_code_stable_db.create_index([('code', 1)])
    #
    #         stable_df.rename(columns=({'symbol': 'code'}),inplace=True)
    #
    #         stable_list = stable_df.to_dict(orient='records')
    #         update_requests = list()
    #         for item in stable_list:
    #
    #             code_cursor = self.ts_code_stable_db.find(
    #                 {"code": item['code']},
    #                 projection={'_id': False})
    #
    #             #如果code在已有数据库里找不到的情况下，才去插入新的数据
    #             if 0 == code_cursor.count():
    #                 update_requests.append(
    #                     UpdateOne(
    #                         {'code': item['code']},
    #                         {'$set': item}, upsert=True))
    #
    #         if len(update_requests) > 0:
    #             update_result = self.ts_code_stable_db.bulk_write(update_requests, ordered=False)
    #             end_time = time.time()
    #             print(f'抓取tushare股票列表固定信息， 插入：{update_result.upserted_count}条，'
    #                   f'更新：{update_result.modified_count}条,耗时: {(end_time - start_time)}秒')
    #     except Exception as e:
    #         print(f"crawl_ts_code_stable_data error info : {e}")
    #     finally:
    #         return

    def crawl_stable_data(self):
        dc = DailyCrawler()
        all_codes =dc.tdx_get_all_codes()
        all_indexes = dc.tdx_get_all_indexes()
        update_requests = []

        start_time = time.time()
        self.stable_db.create_index([('code', 1),('index',1)])
        for code in all_codes:

            try:
                filename = code +".txt"
                file_path = os.path.join(code_nofq_path, filename)
                with open(file_path, 'r') as f:
                    info = f.readline()
                    name = info.split(" ")[1]
                # 找到全部交易日期，升序排列，第一个就是上市日期
                trade_cursor = DB_CONN['daily'].find(
                    {'code': code, 'index': False},
                    sort=[('date', ASCENDING)],
                    projection={'date': True, '_id': False},
                    batch_size=5000)
                doc = dict()

                doc.update({
                    'code': code,
                    'index':False,
                    'name':name,
                    'timeToMarket': trade_cursor[0]['date'],
                })

                update_requests.append(
                    UpdateOne(
                        {'code': code,'index':doc['index']},
                        {'$set': doc}, upsert=True))
            except:
                print('查找daily数据集发生异常，股票代码：%s' % code, flush=True)

        for code in all_indexes:

            try:
                filename = code +".txt"
                file_path = os.path.join(index_nofq_path, filename)
                with open(file_path, 'r') as f:
                    info = f.readline()
                    name = info.split(" ")[1]
                # 找到全部交易日期，升序排列，第一个就是上市日期
                trade_cursor = DB_CONN['daily'].find(
                    {'code': code, 'index': True},
                    sort=[('date', ASCENDING)],
                    projection={'date': True, '_id': False},
                    batch_size=5000)
                doc = dict()

                doc.update({
                    'code': code,
                    'index':True,
                    'name':name,
                    'timeToMarket': trade_cursor[0]['date'],
                })

                update_requests.append(
                    UpdateOne(
                        {'code': code,'index':doc['index']},
                        {'$set': doc}, upsert=True))
            except:
                print('查找daily数据集发生异常，指数代码：%s' % code, flush=True)

        if len(update_requests) > 0:
            update_result = self.stable_db.bulk_write(update_requests, ordered=False)
            end_time = time.time()
            print('抓取股票固定信息， 插入：%4d条，更新：%4d条,耗时: %.3f 秒' %
                 (update_result.upserted_count, update_result.modified_count, (end_time - start_time)),
                 flush=True)

        return
    @staticmethod
    def file2code(index_file):
        name2code_dict = {
            'hs300':'000300',
            'zz500':'000905',
            'sz50':'000016',
            'kc50':'000688',
        }
        file_name = index_file.stem
        update_date = f"{file_name[-8:-4]}-{file_name[-4:-2]}-{file_name[-2:]}"
        index_name = file_name[:-8]
        index_code = name2code_dict[index_name]

        return update_date,index_code

    def crawl_index_cons(self):

        index_path = Path(f"{base_data_path}/index_constituent")
        index_file_list = index_path.glob('*.txt')

        update_requests = list()
        collection = self.index_cons_db
        # 建立code+date的索引，提高save_data时写入数据的查询速度
        collection.create_index([('index_code', 1), ('update_date', 1)])
        change_col = {
            "代码": 'code',
            "名称": 'name'
        }

        for index_file in index_file_list:
            print(index_file)
            data_df = pd.read_table(index_file, encoding="GBK", dtype=object)
            data_df.drop(data_df.tail(1).index , inplace=True)  # 删除最后一行，并且替换原数据

            #print(data_df.head())
            data_df.rename(columns=change_col,inplace=True)
            data_df['code'] = data_df['code'].astype(str)

            update_date,index_code = self.file2code(index_file)

            doc = dict()
            doc.update({
                'index_code': index_code,
                'update_date':update_date,
                'code_list':list()
            })

            for index, row in data_df.iterrows():
                doc['code_list'].append(dict(row))

            update_requests.append(
                UpdateOne(
                    {'index_code': index_code,'update_date':update_date},
                    {'$set': doc},
                    upsert=True)
            )

        # print(update_requests)
        # 批量写入，提高访问效率
        if len(update_requests) > 0:
            start_time = time.time()
            update_result = collection.bulk_write(update_requests, ordered=False)
            end_time = time.time()
            print('保存指数成分数据到数据集：%s，插入：%4d条, 更新：%4d条,耗时：%.3f 秒' %
                  (collection.name, update_result.upserted_count, update_result.modified_count,
                   (end_time - start_time)),
                  flush=True)


        return

    def supply_stable_data(self,data_df):

        update_requests = list()
        collection = self.stable_db

        for index, row in data_df.iterrows():

            doc = dict()
            doc.update({
                'sub_industry': row['sub_industry']
                #'area': row['area']
            })

            update_requests.append(
                UpdateOne(
                    {'code': row['code'], 'index': False},
                    {'$set': doc}, upsert=True))

    # print(update_requests)
    # 批量写入，提高访问效率
        if len(update_requests) > 0:
            start_time = time.time()
            update_result = collection.bulk_write(update_requests, ordered=False)
            end_time = time.time()
            print('保存个股对应的细分行业等数据到数据集：%s，插入：%4d条, 更新：%4d条,耗时：%.3f 秒' %
                  (collection.name, update_result.upserted_count, update_result.modified_count,
                   (end_time - start_time)),
                  flush=True)

        return

    def crawl_block_info(self,data_df,origin):


        update_requests = list()
        collection = self.block_db
        # 建立code+date的索引，提高save_data时写入数据的查询速度
        collection.create_index([('name', 1),('type',1),('origin',1)])

        groupby_tuple = data_df.groupby('sub_industry')
        #每一个group都是一个元组
        for group in groupby_tuple:

            #有时候tdx导出的数据异常，会有重复数据，要去重
            doc = dict()
            doc.update({
                'name': group[0],
                'type': "sub_industry",
                'origin': origin,
                'cur_count': group[1].index.size,
                'code_list': list()
            })

            for index, row in group[1].iterrows():
                doc['code_list'].append(row['code'])

            update_requests.append(
                UpdateOne(
                    {'name': doc['name'],'type':doc['type'],'origin':doc['origin']},
                    {'$set': doc},
                    upsert=True)
            )

        # print(update_requests)
        # 批量写入，提高访问效率
        if len(update_requests) > 0:
            start_time = time.time()
            update_result = collection.bulk_write(update_requests, ordered=False)
            end_time = time.time()
            print('保存板块成分数据到数据集：%s，插入：%4d条, 更新：%4d条,耗时：%.3f 秒' %
                  (collection.name, update_result.upserted_count, update_result.modified_count,
                   (end_time - start_time)),
                  flush=True)


        return


    def crawl_tdx_daily_data(self):

        daily_data_path = Path(f"{base_data_path}/dailydata")
        data_file_list = list(daily_data_path.glob('*.txt'))

        #读取最新一次的每日数据
        data_df = pd.read_table(data_file_list[-1], encoding="GBK", dtype=object)
        data_df.drop(data_df.tail(1).index, inplace=True)  # 删除最后一行，并且替换原数据

        change_col = {
            "代码": 'code',
            "名称": 'name',
            "细分行业":'sub_industry',
            "地区":"area"
        }

        data_df.rename(columns=change_col, inplace=True)
        data_df['code'] = data_df['code'].astype(str)

        #print(data_file_list[-1],data_df.tail())
        data_df.drop_duplicates(['code'], keep='last', inplace=True)

        #补充不变的数据，如所属细分行业
        self.supply_stable_data(data_df)

        #记录各个行业的数据
        self.crawl_block_info(data_df,"tdx")

        return

    def crawl_choice_daily_data(self):

        daily_data_path = Path(f"{base_data_path}/dailydata")
        data_file_list = list(daily_data_path.glob('*.xls'))
        #print(data_file_list)
        #读取最新一次的每日数据
        data_df = pd.read_excel(data_file_list[-1], dtype=object)
        data_df.drop(data_df.tail(1).index, inplace=True)  # 删除最后一行，并且替换原数据
        #print(data_df)

        change_col = {
            "代码": 'code',
            "名称": 'name',
            "所属行业":'sub_industry',
        }

        data_df.rename(columns=change_col, inplace=True)
        data_df['code'] = data_df['code'].astype(str)

        # groupby_tuple = data_df.groupby('code')
        # for group in groupby_tuple:
        #     if group[1].index.size > 1:
        #         print(group[0])

        #print(data_file_list[-1],data_df.tail())
        data_df.drop_duplicates(['code'], keep='last', inplace=True)

        #补充不变的数据，如所属细分行业
        self.supply_stable_data(data_df)

        #记录各个行业的数据
        self.crawl_block_info(data_df,"choice")

        return

if __name__ == '__main__':
    pd.set_option('display.width', 130)
    pd.set_option('display.max_columns', 130)
    pd.set_option('display.max_colwidth', 130)

    bc =BasicCrawler()
    #bc.crawl_ts_daily_basic(begin_date='2019-11-22',end_date='2019-11-22')
    #bc.crawl_stable_data()
    bc.crawl_index_cons()
    #bc.crawl_tdx_daily_data()
    #bc.crawl_choice_daily_data()