import datetime
import os
import re
import openpyxl
import pandas as pd
import csv
from collections import OrderedDict
import Defines

currentpath = os.path.dirname(os.path.abspath(__file__))


# 转换灵妖协议覆盖
def lingyao_base():
    base_excel = r'dataneed\灵妖验收.xlsx'
    base_excel = os.path.join(currentpath, base_excel)
    base_sheet = '旧的协议覆盖'
    df = pd.read_excel(base_excel, sheet_name=base_sheet)
    protocol_col = df [ '协议名称' ]
    rows_to_delete = [ ]
    for idx, value in protocol_col.items():
        # 判断当前单元格是否为字符串类型且以'CS'开头
        if not isinstance(value, str) or not value.startswith('CS'):
            # 记录需要删除的行号
            rows_to_delete.append(idx)
    df.drop(rows_to_delete, axis=0, inplace=True)
    df.to_excel('temp_new_file.xlsx', index=False)


def write_csvlog(datadict: dict, logclass: str):
    now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    currentpath = os.path.dirname(os.path.abspath(__file__))
    csv_file = f"{logclass}_{now}.csv"
    csv_filepath = os.path.join(currentpath, csv_file)
    print('生成在如下路径: ', csv_filepath)
    with open(csv_filepath, mode='w', newline='') as file:
        writer = csv.writer(file)
        for key, value in datadict.items():
            writer.writerow([ key, value ])


# 转换西游-bot的log日志
def log_to_csv_withoutTotal(file_path):
    """没有Total汇总行"""
    print("开始处理文件:", file_path)

    with open(file_path, 'r', encoding='utf-8') as log_file:
        lines = log_file.readlines()

    # Step 1: 找到最后一个 INFO 行，并定位到表格起始位置
    last_info_index = None
    for i in range(len(lines)):
        if "INFO" in lines[i] and "UserResponse" in lines[i + 1]:
            last_info_index = i

    if last_info_index is None:
        print("未找到有效的 INFO 行")
        return

    # Step 2: 从最后一个 INFO 行向后找，找到第一个含 "Name" 的表格标题行
    header_line = None
    start_idx = None
    for i, line in enumerate(lines[last_info_index:], start=last_info_index):
        if "Name" in line and "|" in line:
            header_line = line.strip()
            start_idx = i
            break

    if not header_line:
        print("未能识别到表头行")
        return

    # Step 3: 提取表头
    headers = [col.strip() for col in header_line.split('|') if col.strip()]

    # Step 4: 提取数据行，直到遇到分隔线
    table_data = []
    for line in lines[start_idx + 2:]:  # 跳过表头和下一行的分隔线
        if "-" in line and len(line.strip()) > 0 and set(line.strip()) == {"-"}:
            break  # 遇到全为 '-' 的分割线，结束提取
        if "|" in line:
            row = [col.strip() for col in line.split('|') if col.strip()]
            if row:
                table_data.append(row)

    # Step 5: 写入 CSV
    base_name = os.path.splitext(file_path)[0]  # 去掉原后缀
    filename = f"{base_name}.csv"
    print("正在写入文件：", filename)
    with open(filename, 'w', newline='', encoding='utf-8') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow(headers)
        writer.writerows(table_data)
    return filename
def log_to_csv_withTotal():
    """有Total汇总行"""
    base_file = r'tempbot\elapse-2024-08-30.log'
    base_file = os.path.join(currentpath, base_file)
    with open(base_file,
              'r') as log_file:
        lines = log_file.readlines()

    table_data = [ ]
    start_line = None
    header = None
    for i, line in enumerate(lines):
        if '|' in line:
            row_data = line.strip()
            if row_data.startswith('Name'):
                header = [ col.strip() for col in row_data.split('|') if col.strip() ]
                start_line = i
            if row_data.startswith('Total') and header:
                end_line = i + 1
                table_data.append((header, lines [ start_line:end_line ]))
                header = None
    for header, lines in table_data:
        data = [ ]
        for line in lines:
            if '|' in line:
                row_data = line.strip()
                data.append([ col.strip() for col in row_data.split('|') if col.strip() ])
        now = datetime.datetime.now().strftime("%Y%m%d%H%M")
        with open(f"bot_{now}.csv", 'w', newline='', encoding='utf-8') as csv_file:
            writer = csv.writer(csv_file)
            writer.writerows(data)

# 转换西游协议覆盖
def xiyou_base():
    base_file = r'dataneed\西游压测工具验收.xlsx'
    base_file = os.path.join(currentpath, base_file)
    wb = openpyxl.load_workbook(base_file)
    sheet = wb [ 'locust协议覆盖' ]

    pro_dict = OrderedDict()
    for row in sheet.iter_rows():
        cell3 = row [ 2 ]
        cell2 = row [ 1 ]
        if cell3.value:
            lines_cell3 = re.split(r'\n', str(cell3.value).strip())
            lines_cell3 = [ line.split('//') [ 0 ].strip() if '//' in line else line.strip() for line in
                            lines_cell3 ]
            for line in lines_cell3:
                pro_dict.setdefault(line, cell2.value)
    write_csvlog(pro_dict, 'botbase')


# 转换西游-locust的csv日志协议转换
def locust_csv():
    base_file = r'tempbot\requests_xiyou.csv'
    base_file = os.path.join(currentpath, base_file)
    file_csv = base_file
    df = pd.read_csv(file_csv)

    def process_name(name):
        name = str(name)
        if name.startswith('ACS'):
            return name.replace('ACS', 'A')
        elif name.startswith('A'):
            try:
                idname = 'A' + str(Defines.ID_DEFINE(name [ 1: ]))
                return name.replace(name, idname)
            except:
                return name

        else:
            return name

    df [ 'Name' ] = df [ 'Name' ].apply(process_name)
    df.to_csv(file_csv, index=False)


# 转换西游错误码
def xyerrcode_csv():
    base_file = r'dataneed\西游压测工具验收.xlsx'
    base_file = os.path.join(currentpath, base_file)
    wb = openpyxl.load_workbook(base_file)
    ws = wb [ '错误码信息' ]
    data = [ cell.value for cell in ws [ 'A' ] ]
    error_dict = {}
    for row, text in enumerate(data):
        if isinstance(text, str) and 'public static final int' in text:
            code, desc = text.split('=')
            code = code.replace('public static final int', '').strip()
            desc = desc.strip().strip(';').strip()
            error_dict [ desc ] = code

    df = pd.DataFrame(list(error_dict.items()), columns=[ '错误码id', '错误码描述' ])
    df.to_csv('output.csv', index=False)


def excel_csv():
    excel_file = r'tempbot\组队.xlsx'
    df = pd.read_excel(excel_file)
    csv_file = '混压.csv'
    df.to_csv(csv_file, index=False, encoding='utf-8')
    print(f'Excel 文件 {excel_file} 已成功转换为 CSV 文件 {csv_file}，使用 UTF-8 编码。')

# excel_csv()
# lingyao_base()
# bot_log()
# xiyou_base()
# locust_csv()
# xyerrcode_csv()