from DataGeter import parse_url, inputfomating
from OtherTools import filetool
from DataTools import datatool
import pandas as pd
import numpy as np
import asyncio
import random
import datetime
import time
import gc
import os
from rich import print


def Parse163(area_code, start, end, temPath=r"D:/StockDatas/"):
    """
    + 通过 request请求 163 的股票历史数据
    + code like 000001 601398
    + area like 1 , 0 1深圳 0上海
    + starttime like 20201117
    + header like
        +{
        +'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
        }
    + 没有输出，需要通过temPath查找文件保存地址
    """

    url = "http://quotes.money.163.com/service/chddata.html?"
    param1 = "code={}&start={}&end={}".format(area_code, start, end)
    param2 = "&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP"
    url = url + param1 + param2

    header = {
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    }

    con = parse_url.parse_url(url, header)
    temPath = os.path.join(temPath + "temp.csv")
    if con != None:
        with open(temPath, "w", encoding="gbk") as f:
            f.write(con)

    df = pd.read_csv(temPath, encoding="gbk")
    if df.shape[0] > 0:
        rename_dic = {
            "日期": "date",
            "股票代码": "code",
            "名称": "ChName",
            "收盘价": "close",
            "最高价": "high",
            "最低价": "low",
            "开盘价": "open",
            "前收盘": "BeforeClose",
            "涨跌额": "Change",
            "涨跌幅": "Rate",
            "换手率": "ExRate",
            "成交量": "Volume",
            "成交金额": "Amount",
            "总市值": "TotalVal",
            "流通市值": "CurrencyVal",
        }
        df.rename(columns=rename_dic, inplace=True)
        return df
    else:

        return pd.DataFrame()


def Reform163(df):
    df = df.loc[(df.loc[:, "Rate"] != "None")]
    df.loc[:, "CurrencyShare"] = df.loc[:, "CurrencyVal"] / df.loc[:, "close"]
    df.loc[:, "TotalShare"] = df.loc[:, "TotalVal"] / df.loc[:, "close"]
    df.loc[:, "Volume"] = df.loc[:, "Volume"] * 100
    intcol = [
        "Volume",
        "TotalVal",
        "CurrencyVal",
        "CurrencyShare",
        "TotalShare",
        "Change",
    ]
    for i in intcol:
        df.loc[:, i] = df.loc[:, i].apply(lambda x: float(x))
    df.loc[:, "Rate"] = df.loc[:, "Change"] / df.loc[:, "BeforeClose"]
    df.loc[:, "ExRate"] = df.loc[:, "Volume"] / df.loc[:, "CurrencyShare"]
    df["Avg"] = df.Amount / df.Volume

    return df


def retryLoop(area_code, start, end, temPath):
    kill = 0
    retrysleep = 0
    for i in range(5):
        df = Parse163(area_code, start=start, end=end, temPath=temPath)
        if df.shape[0] > 0:
            return df, retrysleep
        elif df.shape[0] == 0:
            kill += 1
            print("{} retrying {}".format(area_code, kill).center(80, "-"))
            t = parse_url.sleeper(50, 300, 100)

            retrysleep += t
        elif kill == 4:
            print("{}kill process".format(area_code).center(80, "*"))
            exit()


def getDateList(code, Fpath, colName="time"):
    """
    检索之前文件，填补交易日中的空缺时间
    """
    old = pd.read_csv(Fpath, index_col=None)
    if old.shape[0] >= 1:
        datetype = old.loc[0, colName]
        last_time = old[colName].apply(
            lambda x: "".join(str(x).split(" ")[0].split("-"))
        )
        last_time = set(last_time.tolist())
        last_time = [int(i) for i in last_time]
        last_time.sort()
        return last_time, old
    else:
        os.remove(Fpath)
        return None, None


def reformDF(df):
    """
    文件排列时间,删除无效字段
    """
    df.loc[:, "dates"] = pd.to_datetime(df.loc[:, "date"])
    df = df.sort_values(by="dates", ascending="True")
    df = df.drop("dates", axis=1)
    df[
        (df.loc[:, "open"] == 0)
        & (df.loc[:, "close"] == 0)
        & (df.loc[:, "high"] == 0)
        & (df.loc[:, "low"] == 0)
    ] = np.nan

    df = df.dropna(subset=["open", "close", "low", "high"])
    return df


def RequestData(
    code,
    stock_details,
    counter=1,
    endday="20201122",
    totalLens=1,
    timer=0,
    retrysleep=0,
    ParthDic={
        "Main": "D:/StockDatas/",
        "basic": "basic/",
        "daily": "163_Daily_Bar/",
        "report": "Reports/",
        "temp": "temp/",
        "ticker": "History_ticker/",
        "mins": "History_mins/",
    },
    printLens=60,
):
    """
    code=000001,
    stock_details = StockDataDic[code],
    counter=0,
    endday="20201122",
    totalLens=0,
    timer=0,
    MainDir=r"D:/StockDatas/",
    subdir=r"163_Daily_Bar/",
    tempath="temp/",
    printLens=60,
    """

    _df = pd.DataFrame()
    # 没有出现在skiplist中的数据将会被筛选
    tempath = ParthDic["Main"] + ParthDic["temp"]
    FilePath = ParthDic["Main"] + ParthDic["daily"] + code + ".csv"
    # 获取起始时间 并且格式化
    starttime = int(inputfomating.dateFormating(stock_details[0], mode="S"))  # 获取开始时间

    endday = int(inputfomating.dateFormating(endday, mode="S"))

    today = datetime.datetime.now()
    today = str(today).split(" ")
    todaydate = int("".join(today[0].split("-")))
    todayHour = int("".join(today[1].split(":")[0]))

    lasttime = None
    if filetool.FileCheck(FilePath):  # 查看文件是否存在,为否从头获取
        lasttime, _df = getDateList(code, FilePath, colName="date")
        if lasttime:
            starttime = lasttime[-1]
            _df.drop_duplicates("date", inplace=True)
            _df.to_csv(FilePath, index=False)

    if counter != 0 and totalLens != 0:
        print(
            "{} / {} | {} >> {}".format(
                counter,
                totalLens,
                starttime,
                endday,
            ).center(printLens, " ")
        )

    if (
        starttime < int(endday)
        or (starttime == int(endday) and filetool.FileCheck(FilePath) == False)
    ) and (
        ((int(endday) == todaydate and todayHour >= 16) or (int(endday) != todaydate))
    ):  # 判断初始时间

        area_code = str(stock_details[-1]) + str(code)

        df, retrysleep = retryLoop(area_code, starttime, endday, temPath=tempath)

        retrysleep += retrysleep

        df = Reform163(df)
        if lasttime != None:
            df = pd.concat([_df, df])
        df = reformDF(df)
        df.drop_duplicates("date", inplace=True)
        df.to_csv(FilePath, index=False)
        print(FilePath.center(printLens, " "), "\n")
        t = parse_url.sleeper(10, 100, 100)
        timer += t
        if counter != 0 and totalLens != 0:
            print(
                " {} m | {} H".format(
                    round(float(timer / 60), 2),
                    round(
                        float((timer + retrysleep) / (counter / totalLens) / (60 * 60)),
                        2,
                    ),
                ).center(printLens, " ")
            )
        return df, timer, retrysleep
    else:
        return _df, timer, retrysleep


def Mainloop(
    skiplist=["689009"],
    MainDir=r"D:/StockDatas/",
    subdir=r"163_Daily_Bar/",
    basicdir=r"basic/",
    temPath="temp/",
):

    printLens = 60

    dic = datatool.getCodeAndDetail(MainPath=r"D:/StockDatas/", subPath=r"basic/")
    dickeys = set(dic.keys())
    spl = len(dickeys) / 5  # 对列表进行分割

    endday = datatool.getTradedayinfo(
        MainPath=r"D:/StockDatas/", subPath=r"basic/", mode="LastOpenDay"
    )
    endday = "".join(endday.split("-"))

    dirpath = MainDir + subdir
    tempath = MainDir + temPath

    filetool.DirPathChick(dirpath)
    filetool.DirPathChick(tempath)

    counter = 1
    timer = 0
    retrysleep = 0
    totalLens = len(dic)
    codeL = []
    for code in dickeys:  # 开始主循环
        print("\n" * 3)
        print(code.center(printLens, " "))
        if code in skiplist:  # 跳过有问题的数据
            print(code, "skip")
        elif len(codeL) < 5:
            codeL.append(codeL)
        else:
            counter += 1
            stock_details = dic[code]
            df, timer, retrysleep = RequestData(
                code,
                stock_details,
                counter=counter,
                endday=endday,
                totalLens=totalLens,
                timer=timer,
                retrysleep=retrysleep,
                MainDir=MainDir,
                subdir=subdir,
                tempath=temPath,
                printLens=60,
            )
            gc.collect()


async def RequestData_M(
    code,
    stock_details,
    counter=1,
    endday="20201122",
    totalLens=1,
    timer=0,
    retrysleep=0,
    ParthDic={
        "Main": "D:/StockDatas/",
        "basic": "basic/",
        "daily": "163_Daily_Bar/",
        "report": "Reports/",
        "temp": "temp/",
        "ticker": "History_ticker/",
        "mins": "History_mins/",
    },
    printLens=60,
):
    await asyncio.sleep(0.05)
    print(code.center(printLens, " "))
    _df = pd.DataFrame()
    # 没有出现在skiplist中的数据将会被筛选
    tempath = ParthDic["Main"] + ParthDic["temp"]
    FilePath = ParthDic["Main"] + ParthDic["daily"] + code + ".csv"
    # 获取起始时间 并且格式化
    starttime = int(inputfomating.dateFormating(stock_details[0], mode="S"))  # 获取开始时间

    endday = int(inputfomating.dateFormating(endday, mode="S"))
    today = datetime.datetime.now()
    today = str(today).split(" ")
    todaydate = int("".join(today[0].split("-")))
    todayHour = int("".join(today[1].split(":")[0]))

    lasttime = None
    if filetool.FileCheck(FilePath):  # 查看文件是否存在,为否从头获取
        lasttime, _df = getDateList(code, FilePath, colName="date")
        starttime = lasttime[-1]
        _df.drop_duplicates("date", inplace=True)
        _df.to_csv(FilePath, index=False)

    if counter != 0 and totalLens != 0:
        print(
            "{} / {} | {} >> {}".format(
                counter,
                totalLens,
                starttime,
                endday,
            ).center(printLens, " ")
        )

    if (
        starttime < int(endday)
        or (starttime == int(endday) and filetool.FileCheck(FilePath) == False)
    ) and (
        ((int(endday) == todaydate and todayHour >= 16) or (int(endday) != todaydate))
    ):  # 判断初始时间

        area_code = str(stock_details[-1]) + str(code)
        kill = 0
        retrysleep = 0
        for i in range(5):
            df = Parse163(area_code, start=starttime, end=endday, temPath=tempath)
            if df.shape[0] > 0:
                break
            elif df.shape[0] == 0:
                kill += 1
                print("{} retrying {}".format(area_code, kill).center(80, "-"))
                t = parse_url.sleeper(50, 300, 100)
                await asyncio.sleep(t)
                retrysleep += t
            elif kill == 4:
                print("{}kill process".format(area_code).center(80, "*"))
                exit()
        retrysleep += retrysleep

        df = Reform163(df)
        if lasttime != None:
            df = pd.concat([_df, df])
        df = reformDF(df)
        df.drop_duplicates("date", inplace=True)
        df.to_csv(FilePath, index=False)
        print(FilePath.center(printLens, " "), "\n")
        t = random.randint(500, 1000) / 100
        timer += t
        print("< {} sec >".format(t).center(printLens, " "), "\n")
        print(
            " {} m | {} H".format(
                round(float(timer / 60), 2),
                round(
                    float((timer + retrysleep) / (counter / totalLens) / (60 * 60)),
                    2,
                ),
            ).center(printLens, " ")
        )
        await asyncio.sleep(t)
        return df, timer, retrysleep
    else:
        return _df, timer, retrysleep


def Mainloop_async(
    numL=4,
    skiplist=["689009"],
    ParthDic={
        "Main": "D:/StockDatas/",
        "basic": "basic/",
        "daily": "163_Daily_Bar/",
        "report": "Reports/",
        "temp": "temp/",
        "ticker": "History_ticker/",
        "mins": "History_mins/",
    },
):

    printLens = 60

    dic = datatool.getCodeAndDetail(MainPath=r"D:/StockDatas/", subPath=r"basic/")
    dickeys = set(dic.keys())
    spl = len(dickeys) / 5  # 对列表进行分割

    endday = datatool.getTradedayinfo(
        MainPath=r"D:/StockDatas/", subPath=r"basic/", mode="LastOpenDay"
    )
    endday = "".join(endday.split("-"))

    dirpath = ParthDic["Main"] + ParthDic["daily"]
    tempath = ParthDic["Main"] + ParthDic["temp"]

    filetool.DirPathChick(dirpath)
    filetool.DirPathChick(tempath)

    counter = 0
    timer = 0
    retrysleep = 0
    totalLens = len(dic)
    codeL = {}
    loop = asyncio.get_event_loop()

    while True:
        nows = int("".join(datetime.datetime.now().strftime("%H:%M").split(":")))
        if nows < 600:
            return
        T = (1600 - nows) / 100 * 60
        if T < 0:
            print("start!")
            break
        else:
            T = T * 6 + random.randint(0, 15)
            print("{}后开始接收数据".format(round(T)).center(printLens, " "))
            time.sleep(T)

    for code in dickeys:  # 开始主循环
        if code in skiplist:  # 跳过有问题的数据
            print(code, "skip")
        elif len(codeL) < numL:
            codeL[code] = dic[code]
        else:
            print("".center(60, "-"))
            # print(str(codeL).center(printLens, " "))
            counter += numL
            tasks = [
                RequestData_M(
                    code,
                    codeL[code],
                    counter=counter,
                    endday=endday,
                    totalLens=totalLens,
                    timer=timer,
                    retrysleep=retrysleep,
                    ParthDic=ParthDic,
                    printLens=60,
                )
                for code in codeL
            ]
            loop.run_until_complete(asyncio.wait(tasks))
            codeL = {}
            gc.collect()
    # loop.close()


# %%
