import os
import re
import sys

import pandas as pd
import tushare as ts
from loguru import logger

from mylib.mycsv import sort_csv, read_csv
from models.stock_model import StockNumber, DayInfo


def get_all_stock(fw, N, stock, end_date):
    df = pd.read_csv('../../all.csv')
    count = 0
    for row in df.index:
        if stock and df.loc[row]['ts_code'] != stock:
            continue
        sn = StockNumber(df.loc[row])
        if '退' in sn.name:
            continue
        if 'ST' in sn.name:
            continue
        if not str(sn.ts_code).startswith('60') \
                and not str(sn.ts_code).startswith('30') \
                and not str(sn.ts_code).startswith('00'):
            continue
        count += 1
        analysis_stock(fw, N, sn, end_date)


def analysis_stock(fw, N, sn, end_date):
    csv_path = f'stocks/{sn.ts_code}.csv'
    if not os.path.exists(csv_path):
        pro = ts.pro_api(token='bbc6f076aa3d2b063cb26376ad12ffd94b53864b42d2344b3aa2039f')
        df2 = pro.daily(ts_code=sn.ts_code)
        save_dir = f'../../stocks'
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
        df2.to_csv(csv_path)
        print(f'save {sn.ts_code} {sn.name} 成功')
    df2 = pd.read_csv(csv_path)
    down_arr = []
    all_days_data = []
    today_di = None
    for row2 in df2.index:
        if len(down_arr) >= N:
            link_code_arr = sn.ts_code.split('.')
            link_code = f'{link_code_arr[1]}{link_code_arr[0]}'
            hyperlink = f'= HYPERLINK("https://xueqiu.com/S/{link_code}"),{di.name}'
            msg = f'{hyperlink},{di.industry},{down_arr[0].close},{down_arr[0].trade_date},{len(all_days_data)},天前{N}连跌\n'
            fw.write(msg)
            print(msg)
            all_days_data = []
            break
        di = DayInfo(sn, df2.loc[row2])
        di.name = sn.name
        di.ts_code = sn.ts_code
        di.industry = sn.industry
        # di.trade_date = df2.loc[row2]['trade_date']
        if str(di.trade_date) > str(end_date):
            continue
        # di.open = df2.loc[row2]['open']
        # di.high = df2.loc[row2]['high']
        # di.low = df2.loc[row2]['low']
        # di.close = float(df2.loc[row2]['close'])
        if di.close < 10:
            break
        # di.pre_close = df2.loc[row2]['pre_close']
        # di.change = df2.loc[row2]['change']
        # di.pct_chg = float(df2.loc[row2]['pct_chg'])
        # di.vol = df2.loc[row2]['vol']
        # di.amount = df2.loc[row2]['amount']
        if di.pct_chg <= 0:
            down_arr.append(di)
        else:
            down_arr = []
        all_days_data.append(di)
        if today_di is None:
            today_di = di


def printNumber(file_path, N, end_date):
    with open(file_path, 'a+') as fw:
        title = "超链接,名字,行业,价格,不满足日期,ndays,描述\n"
        print(title)
        fw.write(title)
        for stock in stocks:
            get_all_stock(fw, N, stock, end_date)


if __name__ == '__main__':
    bkd = 40
    N = 2
    kdays = 19
    if len(sys.argv) > 1:
        kdays = int(sys.argv[1])

    stocks = [
        '600438.SH',
    ]
    log_dir = os.path.abspath(__file__).replace('.py', '').replace('_cal', '')
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    fpath = f'{log_dir}/{kdays}_n_down_bkd.txt'
    if os.path.exists(fpath):
        os.remove(fpath)

    logger.add(fpath, format='{message}')

    df = pd.read_csv('../../stocks/000001.SZ.csv')
    all_trade_date_arr = []
    for row in df.index:
        all_trade_date_arr.append(df.loc[row]['trade_date'])

    t_arr = []
    file_path_arr = []
    for idx, trade_date in enumerate(all_trade_date_arr):
        if idx < 0:
            continue
        if idx > bkd:
            break
        today_str = str(trade_date)
        file_name = '{}_连跌_{}.csv'.format(trade_date, N)
        file_path = f'{log_dir}/{file_name}'
        file_path_arr.append((trade_date, file_path))

    logger.info('开始计算')
    data_arr = []
    for _, file_path in file_path_arr:
        sort_csv(file_path)
        data_arr.append(read_csv(file_path, kdays))
    for idx in range(len(data_arr) - 1):
        rdata = data_arr[idx] - data_arr[idx + 1]
        logger.warning(f'{file_path_arr[idx][0]}, 上榜{len(rdata)}:')
        logger.info('')
        for rd in rdata:
            rule = '(.*?) = HYPERLINK\("(.*?)"\)'
            res = re.findall(rule, rd)[0]
            logger.info(res[0])
            logger.info('')
            logger.info(res[1])
            logger.info('')

        logger.info('')
        rdata = data_arr[idx + 1] - data_arr[idx]
        logger.warning(f'{file_path_arr[idx][0]}, 落榜{len(rdata)}:')
        logger.info('')
        for rd in rdata:
            rule = '(.*?) = HYPERLINK\("(.*?)"\)'
            res = re.findall(rule, rd)[0]
            logger.info(res[0])
            logger.info('')
            logger.info(res[1])
            logger.info('')
        logger.info('=' * 25)
    logger.info('分析运行完成')
