# coding: utf-8

import csv
import json
import os.path
import re
from celery_app import app
from lib.cache.redis_cache import redis_instance
from settings import ROW_LIMIT, outDir


@app.task
def write_data_to_file():
    """TODO
        - 批量写入
    """
    # 批量读取
    data_list = redis_instance.lrange('wait_write_data', 0, 100)
    if data_list:
        # 第一时间删除这条数据
        for pro_item in data_list:
            redis_instance.lrem('wait_write_data', 0, pro_item)
       # 开始写入输出文件
        file_name = redis_instance.get('edge_events_file') or 'edge_events_part1.csv'
        last_file_path = execute_write_file(data_list, file_name)
        # 更新缓存
        redis_instance.set('edge_events_file', last_file_path)
    else:
        # 没有结果
        return

def execute_write_file(datas, file_name):
    '''TODO
        :data 带写入数组
    1、获取文件line number, 计算 可写入行数 5 000 000 - line number = 写入行数
    2、超出部分 - 创建一个新的csv文件 - 写入； 文件命名规则：edge_events_part${n}.csv (n 自增)
    '''
    file_path = os.path.join(outDir, file_name)
    if not os.path.isfile(file_path):
        with open(file_path, mode='a+', newline='') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=[
                'tx_hash', 'log_index', 'block_number', 'timestamp',
                'token_address', 'raw_amount', 'usd_value', 'from', 'to'
            ])
            writer.writeheader()  # 写入表头
            available_num = ROW_LIMIT
            if available_num > 0:
                _datas = datas[:min(available_num, len(datas))]
                writer = csv.DictWriter(csvfile, fieldnames=[
                    'tx_hash', 'log_index', 'block_number', 'timestamp',
                    'token_address', 'raw_amount', 'usd_value', 'from', 'to'
                ])
                # 写入最多 available_num 量的数据
                for data in _datas:
                    writer.writerow(json.loads(data))
    else:
        with open(file_path, mode='a+', newline='') as csvfile:
            csvfile.seek(0)  # 移动到文件开头以读取行数
            line_num = sum(1 for _ in csvfile)  # 计算行数
            available_num =  ROW_LIMIT - (line_num - 1) # 减去表头
            if available_num > 0:
                _datas = datas[:min(available_num, len(datas))]
                writer = csv.DictWriter(csvfile, fieldnames=[
                    'tx_hash', 'log_index', 'block_number', 'timestamp',
                    'token_address', 'raw_amount', 'usd_value', 'from', 'to'
                ])
                # 写入最多 available_num 量的数据
                for data in _datas:
                    writer.writerow(json.loads(data))
    # 剩余数据写入下一人间
    if available_num > len(datas):
        # 说明文件还有剩余空间
        return file_path
    available_data = datas[available_num:]
    # 当文件达到 ROW_LIMIT 上限时, 自动寻找下一个文件
    n = re.search('\d+', file_name)
    index = n.group()
    if not index:
        err = f'初始输出文件命令错误, file_name: {file_name}'
        print(err)
        raise ValueError(err)
    for i in range(1, 99999):
        n = int(index) + i
        file_path, available_num = find_file_name(n)
        if available_num >= len(available_data):
            writer_csv(available_data, file_path)
            return file_path
        else:
            # 写入 available_num 量的数据
            _datas = available_data[:available_num]
            writer_csv(_datas, file_path)
            available_data = available_data[available_num:]

def find_file_name(n):
    for _n in range(n, 99999):
        file_path = os.path.join(outDir, f'edge_events_part{_n}.csv')
        if os.path.isfile(file_path):
            with open(file_path, 'r') as f:
                f.seek(0)  # 移动到文件开头以读取行数
                line_num = sum(1 for _ in f)  # 计算行数
                available_num = ROW_LIMIT - line_num
                if available_num > 0:
                    return file_path, available_num
                continue
        return open_new_part(file_path) , ROW_LIMIT

def open_new_part(file_path):
    # 新建 CSV 文件并写入表头
    with open(file_path, mode='w', newline='') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=[
            'tx_hash', 'log_index', 'block_number', 'timestamp',
            'token_address', 'raw_amount', 'usd_value', 'from', 'to'
        ])
        writer.writeheader()  # 写入表头
    return file_path


def writer_csv(datas, file_path):
    if not datas:
        return
    # 写入数据到 CSV 文件
    with open(file_path, mode='a', newline='') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=[
            'tx_hash', 'log_index', 'block_number', 'timestamp',
            'token_address', 'raw_amount', 'usd_value', 'from', 'to'
        ])
        # 写入一行数据
        for data in datas:
            writer.writerow(json.loads(data))


if __name__ == '__main__':
    write_data_to_file()