import csv
import os
from datetime import datetime
import time
import okx.MarketData as MarketData
from utils.config import *
from utils.utils import *
flag = "0"  # 实盘:0 , 模拟盘：1
marketDataAPI =  MarketData.MarketAPI(flag=flag,domain=server_url)

# 从api拉取数据
def getDataApi(instId='BTC-USDT-SWAP',bar='15m',after=0,max_retries=5):
    # 获取交易产品历史K线数据
    
    """
    获取交易产品历史K线数据，带有重试功能
    :param instId: 交易对
    :param bar: K线周期
    :param after: 时间戳，从该时间点之后获取数据
    :param max_retries: 最大重试次数
    :return: 数据或None
    """
    for attempt in range(max_retries):
        try:
            # 获取交易产品历史K线数据
            if after == 0:
                result = marketDataAPI.get_history_candlesticks(instId=instId, bar=bar)
            else:
                result = marketDataAPI.get_history_candlesticks(instId=instId, after=after, bar=bar)
            
            if result is None:
                return None
            if result['code'] != '0':
                return None
            return result['data']
        except Exception as e:
            if attempt < max_retries - 1:
                # 等待一段时间再重试
                time.sleep(2 ** attempt)  # 指数退避策略
            else:
                print(f"拉取api数据失败 instId={instId}, bar={bar}, after={after}")
                return None
    return None

# 读取文件 并对文件进行 反序保存，适用于 首次全量下载后的操作(同时会对数据进行清洗 保证只有一条)
def sort_csv_by_timestamp(instId,bar):
    filename = getFilename(instId,bar)
    """读取CSV文件并按时间戳排序，同时去重，只保留指定列"""
    required_columns = ["timestamp", "open", "high", "low", "close", "volume"]
    
    with open(filename, mode='r', newline='', encoding='utf-8') as file:
        reader = csv.DictReader(file)
        rows = list(reader)
        
        # 按时间戳排序
        rows.sort(key=lambda x: int(x['timestamp']))
        
        # 去重：保留每个时间戳的第一个记录，并只保留指定列
        unique_rows = {}
        for row in rows:
            timestamp = int(row['timestamp'])
            if timestamp not in unique_rows:
                # 只保留所需的列
                unique_rows[timestamp] = {col: row[col] for col in required_columns}
        
        # 将去重后的记录转换为列表
        unique_rows_list = list(unique_rows.values())
 
    with open(filename, mode='w', newline='', encoding='utf-8') as file:
        writer = csv.DictWriter(file, fieldnames=required_columns)
        writer.writeheader()
        writer.writerows(unique_rows_list)


def get_last_timestamp(filename):
    """读取CSV文件的最后一条记录的时间戳"""
    if not os.path.exists(filename):
        return 0  # 如果文件不存在，返回0
    
    with open(filename, mode='r', newline='', encoding='utf-8') as file:
        reader = csv.reader(file)
        next(reader)  # 跳过表头
        # return int(reader[0][0])
        last_row_time = 0
        for row in reader:
            item = int(row[0])
            if item > last_row_time:
                last_row_time = item
         # 假设时间戳在第一列
    return last_row_time

# 首次全量下载数据  适合文件不存在的情况
def save_kline_data_to_csv(instId,bar='15m'):
    # 定义CSV文件的列名
    fieldnames = ["timestamp", "open", "high", "low", "close", "volume"]
    filename = getFilename(instId,bar)  # CSV文件名
    # 获取最后一条记录的时间戳
    # last_timestamp = get_last_timestamp(filename)
    # print(last_timestamp)
    # exit()
    # 打开CSV文件准备写入
    with open(filename, mode='a', newline='', encoding='utf-8') as file:
        # , fieldnames=fieldnames
        writer = csv.DictWriter(file, fieldnames=fieldnames)
        # 如果是第一次写入，就写入表头
        if file.tell() == 0:
            writer.writeheader()

        ts = 0  # 最久的一条时间戳
        num = 0
        while True:
            data = getDataApi(instId,bar,ts)  # 获取数据
            num+=1
            if num % 10 == 0:
                # 等待1秒防止超过频率
                time.sleep(1)
            if not data:  # 如果没有数据，说明已经拉取完毕
                break
            for row in data:
                # 提取需要的字段
                selected_row = {
                    "timestamp": row[0],
                    "open": row[1],
                    "high": row[2],
                    "low": row[3],
                    "close": row[4],
                    "volume": row[5]
                }
                writer.writerow(selected_row)  # 这里应该是没有问题的
            ts = data[-1][0]
            formatted_date = timeToDate(data[0][0])
            print("当前全量任务进度:",instId, formatted_date)
    print(f"K线数据已保存到 {filename}")

# 增量下载数据 每15分钟 请求一次。每次100条记录。
def save_kline_data_to_csv2(instId,bar='15m'):
    # 定义CSV文件的列名
    fieldnames = ["timestamp", "open", "high", "low", "close", "volume"]
    filename = getFilename(instId,bar)  # CSV文件名

    # 获取最后一条记录的时间戳
    last_timestamp = get_last_timestamp(filename)
    # 打开CSV文件准备写入
    with open(filename, mode='a', newline='', encoding='utf-8') as file:
        writer = csv.DictWriter(file, fieldnames=fieldnames)
        # 如果是第一次写入，就写入表头
        if file.tell() == 0:
            writer.writeheader()

        ts = 0  # 最久的一条时间戳
        num = 0
        isExti = False
        while True:
            data = getDataApi(instId,bar,ts)  # 获取数据
            num+=1
            if num % 10 == 0:
                # 等待1秒防止超过频率
                time.sleep(1)
            if not data:  # 如果没有数据，说明已经拉取完毕
                break
            # 增量存储时 需要将数据进行反序操作
            reversed_data = data[::-1]
            for row in reversed_data:
                # 提取需要的字段
                selected_row = {
                    "timestamp": row[0],
                    "open": row[1],
                    "high": row[2],
                    "low": row[3],
                    "close": row[4],
                    "volume": row[5]
                }
                # 如果这个数据是已经录入过的  则直接跳过
                if(int(row[0])<=last_timestamp):
                    isExti = True
                    # break
                writer.writerow(selected_row)  # 这里应该是没有问题的
            ts = data[-1][0]
            formatted_date = timeToDate(data[0][0])
            print("当前增量任务进度:",instId, formatted_date)
            if isExti:
                break
    print(f"K线数据已保存到 {filename}")

def runAll(bar='15m'):
    # 拉取更新所有的币种代码
    for i in config_down_instId:
        instId = f'{i}-USDT-SWAP'
        filePath = getFilename(instId,bar)
        if not os.path.exists(filePath):
            print('开始全量拉取数据：'+instId)
            save_kline_data_to_csv(instId,bar)
            # 文件存在 开始对文件反序保存
            if os.path.exists(filePath):
                sort_csv_by_timestamp(instId,bar)
        else:
            # 文件不存在 ，进行全量拉取
            print('开始增量拉取数据：'+instId)
            # 存在文件，进行增量拉取
            save_kline_data_to_csv2(instId,bar)
            if os.path.exists(filePath):
                sort_csv_by_timestamp(instId,bar)
        time.sleep(1)


# 只需要15m 级别k线 后面演化为其他级别的即可
runAll()