import pandas as pd
import argparse
import os
from rich.console import Console
from rich.table import Table
import matplotlib.pyplot as plt
import pymysql
import cfg

base_dir = os.path.dirname(os.path.abspath(__file__))

# python post_processing_v2.py --input_csvs 20241012 20241013 20241015 20241016 20241018 --output_dir ./output/postprocess/test



db = pymysql.connect(
    host=cfg.db["host"],
    port=cfg.db["port"],
    user=cfg.db["user"],
    password=cfg.db["password"],
    database=cfg.db["database"]
)
cursor = db.cursor()


# 检查文件扩展名是否合法
def check_file_extension(value):
    # 如果是 YYYYMMDD字符串，则符合
    if value.isdigit() and len(value) == 8:
        return value
    ext = os.path.splitext(value)[1].lower()
    if ext not in ['.csv']:
        raise argparse.ArgumentTypeError(f"File must have a .csv extension, but got '{ext}'")
    return value

# 从文件名提取日期
def extract_date_from_filename(file_path):
    # 假设文件名格式类似 evaluation_results_20241012.csv
    base_folder = os.path.dirname(file_path)
    base_folder = base_folder.split('/')[-1]
    return pd.to_datetime(base_folder, format='%Y%m%d')

def find_largest_csv_file(fold_path):
    largest_file = ''
    largest_size = 0
    for file in os.listdir(fold_path):
        if file.endswith('.csv'):
            file_path = os.path.join(fold_path, file)
            file_size = os.path.getsize(file_path)
            if file_size > largest_size:
                largest_size = file_size
                largest_file = file_path
    return largest_file

# 命令行参数设置
parser = argparse.ArgumentParser(description="Process and analyze multiple CSV files.")
parser.add_argument('--input_csvs', required=True, nargs='+', type=check_file_extension, help='Paths to input CSV files.')
parser.add_argument('--output_dir', required=True, help='Directory to save the summary and plots.')
args = parser.parse_args()

# 创建保存输出的目录
if not os.path.exists(args.output_dir):
    os.makedirs(args.output_dir)

console = Console()

# 汇总保存所有 CSV 数据
combined_df = pd.DataFrame()

# 对每个 CSV 文件进行处理，增加日期列
for file_path in args.input_csvs:
    if "/" not in file_path:
        fold_path = os.path.join(base_dir, "output/result", file_path)
        # find the largest csv file in fold_path
        file_path = find_largest_csv_file(fold_path)
    print(f"Processing file: {file_path}")
    df = pd.read_csv(file_path)
    df['ID'] = df['ID'].astype(str)
    df[['caller_num', 'callee_num', 'url']] = None
    begintimes = df['ID'].str[:14].unique()
    begintime_str = ','.join([f"'{bt[:4]}-{bt[4:6]}-{bt[6:8]} {bt[8:10]}:{bt[10:12]}:{bt[12:]}'" for bt in begintimes])
    query1 = f"SELECT begintime, customer_uuid, record_file_name FROM cti_record WHERE begintime IN ({begintime_str});"
    cursor.execute(query1)
    results1 = cursor.fetchall()
    begintime_map = {str(row[0]): (row[1], row[2]) for row in results1}
    customer_uuids = [row[1] for row in results1 if row[1] is not None]
    customer_uuid_str = ','.join([f"'{uuid}'" for uuid in customer_uuids])

    if customer_uuid_str:
        query2 = f"SELECT call_uuid, caller_num, callee_num FROM cti_cdr_call WHERE call_uuid IN ({customer_uuid_str});"
        cursor.execute(query2)
        results2 = cursor.fetchall()
        customer_uuid_map = {row[0]: (row[1], row[2]) for row in results2}
    else:
        customer_uuid_map = {}
        print("未找到任何有效的 customer_uuid，跳过相关查询。")

    for index, row in df.iterrows():
        begintime = row['ID'][:14]
        formatted_begintime = f"{begintime[:4]}-{begintime[4:6]}-{begintime[6:8]} {begintime[8:10]}:{begintime[10:12]}:{begintime[12:]}"
        if formatted_begintime in begintime_map:
            customer_uuid, record_file_name = begintime_map[formatted_begintime]
            if customer_uuid in customer_uuid_map:
                caller_num, callee_num = customer_uuid_map[customer_uuid]
                df.at[index, 'caller_num'] = caller_num
                df.at[index, 'callee_num'] = callee_num
                df.at[index, 'url'] = f"http://116.62.120.233{record_file_name}"
            else:
                print(f"未找到 customer_uuid {customer_uuid} 对应的 caller_num 和 callee_num。")
        else:
            print(f"未找到 begintime {formatted_begintime} 对应的 customer_uuid 和 record_file_name。")

    # 从文件名中提取日期
    date = extract_date_from_filename(file_path)
    df['Date'] = date
    date_str = date.strftime('%Y-%m-%d')
    output_file_path = f"{args.output_dir}/{date_str}.xlsx"
    df.to_excel(output_file_path, index=False)
    
    console.print(f"Processing file: {file_path}")
    
    # 统计每个评估指标
    filtered_columns = [col for col in df.columns if 'ID' not in col and '原因' not in col and '关键' in col]
    
    # 计算每个指标的统计
    summary_table = Table(title=f"Summary for {os.path.basename(file_path)}", show_header=True)
    summary_table.add_column("Category", justify="left", style="cyan", width=20)
    summary_table.add_column("Value", justify="left", style="cyan", width=45)
    summary_table.add_column("Count", justify="right", style="magenta", width=10)
    summary_table.add_column("Percentage", justify="right", style="green", width=15)

    summary_data = {}
    for col in filtered_columns:
        value_counts = df[col].value_counts()
        percentages = value_counts / value_counts.sum() * 100
        summary_data[col] = (value_counts, percentages)
        
        # 输出到控制台
        for i, (value, count) in enumerate(value_counts.items()):
            percentage = '{:.2f}%'.format(percentages[value])
            if i == 0:
                summary_table.add_row(col, str(value), str(count), percentage)
            else:
                summary_table.add_row("", str(value), str(count), percentage)
    
    console.print(summary_table)

    # 保存到汇总的 DataFrame 中
    combined_df = pd.concat([combined_df, df], ignore_index=True)

# 汇总所有 CSV 文件中的统计数据，按日期分组统计
console.print("Processing combined data by date...")

# 计算按日期分组的统计，移除分组列，避免警告
average_df_by_date = combined_df.groupby('Date').apply(
    lambda group: group[filtered_columns].apply(
        lambda col: col.value_counts(normalize=True) * 100
    )
)

# 将多层索引的 DataFrame 展开，并将列名调整为合适的格式
average_df_by_date = average_df_by_date.stack().reset_index()
average_df_by_date.columns = ['Date', 'Indicator', 'Value', 'Percentage']

# Value列只取(以前的部分
average_df_by_date['Indicator'] = average_df_by_date['Indicator'].str.split('(').str[0].str.split('（').str[0]

# 对 Date 和 Value 列进行排序
average_df_by_date_sorted = average_df_by_date.sort_values(by=['Date', 'Value'])

# 保存分日期统计的结果
average_file_by_date = os.path.join(args.output_dir, "average_summary_by_date.csv")
average_df_by_date_sorted.to_csv(average_file_by_date, index=False)
console.print(f"Average percentages by date saved to {average_file_by_date}")

# 在保存一份xlsx文件
average_file_by_date_xlsx = os.path.join(args.output_dir, "average_summary_by_date.xlsx")
average_df_by_date_sorted.to_excel(average_file_by_date_xlsx, index=False)
console.print(f"Average percentages by date saved to {average_file_by_date_xlsx}")

cursor.close()
db.close()
print("数据库连接已关闭")