import argparse  
import pandas as pd
import os
import time
import subprocess
import requests
import random
import numpy as np
from datetime import datetime, timedelta
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
from influxdb_client.client.query_api import QueryApi
import paramiko

# 解析命令行参数
def parse_args():
    parser = argparse.ArgumentParser(description='InfluxDB 性能测试工具')
    parser.add_argument('--start-time', 
                        type=str, 
                        default='1980-01-01 00:00:00',
                        help='数据起始时间，格式: yyyy-mm-dd 00:00:00 (默认: 1980-01-01 00:00:00)')
    parser.add_argument('--url', 
                        type=str, 
                        required=True,
                        help='InfluxDB 服务器地址和端口 (例如: http://localhost:8086)')
    parser.add_argument('--token', 
                        type=str, 
                        required=True,
                        help='InfluxDB 访问令牌')
    parser.add_argument('--org', 
                        type=str, 
                        required=True,
                        help='InfluxDB 组织名称')
    parser.add_argument('--bucket', 
                        type=str, 
                        default='mydb',
                        help='InfluxDB 存储桶名称 (默认: mydb)')
    parser.add_argument('--csv-file', 
                        type=str, 
                        required=True,
                        help='输入的 CSV 文件路径')
    parser.add_argument('--influx-data-dir', 
                        type=str, 
                        default='/var/lib/influxdb/.influxdbv2/engine/data',
                        help='InfluxDB 数据目录 (默认: /var/lib/influxdb/.influxdbv2/engine/data)')
    return parser.parse_args()

# 主入口函数
def main():
    args = parse_args()
    
    # 转换命令行参数为 datetime 对象
    try:
        # 自定义解析 yyyy-mm-dd 00:00:00 格式
        start_time = datetime.strptime(args.start_time, '%Y-%m-%d %H:%M:%S')
    except ValueError as e:
        print(f"无效的时间格式: {args.start_time}，请使用 yyyy-mm-dd 00:00:00 格式")
        print(f"示例: 2023-01-01 12:30:00")
        exit(1)

    # 读取 CSV 文件
    try:
        data = pd.read_csv(args.csv_file)
        print(f"成功读取 CSV 文件: {args.csv_file}")
    except Exception as e:
        print(f"读取 CSV 文件失败: {e}")
        exit(1)

    # 获取所有列名作为字段名
    all_fields = list(data.columns)

    # InfluxDB 连接配置
    url = args.url
    token = args.token
    org = args.org
    bucket = args.bucket

    # InfluxDB 数据目录
    influx_data_dir = args.influx_data_dir

    # 创建 InfluxDB 客户端
    client = InfluxDBClient(url=url, token=token, org=org)
    write_api = client.write_api(write_options=SYNCHRONOUS)
    query_api = client.query_api()

    # 模拟 10 个传感器
    num_sensors = 10

    # 记录上次监测存储使用情况的时间
    last_storage_check_time = time.time()

    # 记录已执行的性能测试
    completed_tests = set()

    # 定义磁盘容量阈值（GB）和对应的测试次数
    capacity_thresholds = {
        0.2: 100,  # 200M 时执行 100 次操作
        1: 100,
        5: 100,
        10: 100,   # 10GB 时执行 100 次操作
        15: 100,
        25: 100,
        40: 100,
        50: 100,   # 50GB 时执行 100 次操作
        60: 100,
        70: 100,
        80: 100,
        90: 100,
        100: 100   # 100GB 时执行 100 次操作
    }

    # 新增：性能测试结果输出目录
    performance_output_dir = "performance_results"
    os.makedirs(performance_output_dir, exist_ok=True)

    # 标记是否应退出程序
    should_exit = False

    # 记录 CSV 导入轮数
    csv_import_round = 0

    def get_bucket_id():
        """获取指定 bucket 的 ID"""
        headers = {
            "Authorization": f"Token {token}",
            "Content-Type": "application/json"
        }
        bucket_url = f"{url}/api/v2/buckets?org={org}"
        response = requests.get(bucket_url, headers=headers)
        
        if response.status_code == 200:
            bucket_data = response.json()
            for item in bucket_data.get('buckets', []):
                if item['name'] == bucket:
                    return item['id']
            print(f"未找到名为 {bucket} 的 bucket")
        else:
            print(f"请求失败，状态码: {response.status_code}")
        return None


    def get_bucket_disk_usage(bucket_id):
        """通过文件系统命令获取 bucket 的磁盘占用（GB）"""
        if not bucket_id:
            return None
        
        bucket_path = os.path.join(influx_data_dir, bucket_id)
        if not os.path.exists(bucket_path):
            print(f"目录不存在: {bucket_path}")
            return None
        
        try:
            # 执行 du -sb 命令获取目录大小（以字节为单位）
            result = subprocess.run(
                ["du", "-sb", bucket_path], 
                capture_output=True, 
                text=True,
                check=True
            )
            size_bytes = int(result.stdout.strip().split()[0])
            return size_bytes / (1024 ** 3)  # 转换为 GB
        except subprocess.CalledProcessError as e:
            print(f"执行命令失败: {e.stderr}")
            return None
        except Exception as e:
            print(f"获取磁盘使用情况时出错: {e}")
            return None

    def get_bucket_disk_usage_ssh(bucket_id):
        """通过SSH远程执行du命令获取磁盘使用情况"""
        if not bucket_id:
            return None
        
        try:
            # 创建SSH客户端
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            
            # 连接服务器（替换为实际的SSH凭据）
            ssh.connect(
                hostname="172.18.3.24",
                port=22,
                username="user",
                password="user"  # 或使用key_file参数
            )
            
            # 执行du命令
            bucket_path = f"/var/lib/influxdb/.influxdbv2/engine/data/{bucket_id}"
            cmd = f"du -sb {bucket_path} 2>/dev/null"
            stdin, stdout, stderr = ssh.exec_command(cmd)
            
            # 获取命令输出
            output = stdout.read().decode().strip()
            error = stderr.read().decode().strip()
            
            if output:
                size_bytes = int(output.split()[0])
                return size_bytes / (1024 ** 3)  # 转换为GB
            else:
                print(f"SSH命令执行失败: {error}")
                return None
                
        except Exception as e:
            print(f"SSH连接失败: {e}")
            return None
        finally:
            if ssh:
                ssh.close()

    def create_full_weather_point(sensor_id, timestamp, row=None):
        """创建包含所有天气字段的 Point 对象"""
        point = Point("weather_measurement").tag("sensor_id", f'{sensor_id:03}').time(timestamp)
        
        # 如果提供了行数据，使用真实数据；否则生成随机数据
        if row is not None:
            for field in all_fields:
                try:
                    value = float(row[field])
                    point.field(field, value)
                except (ValueError, TypeError):
                    # 如果无法转换为浮点数，跳过该字段
                    continue
        else:
            # 生成随机数据
            point.field("p (mbar)", random.uniform(900, 1100))
            point.field("T (degC)", random.uniform(-20, 40))
            point.field("Tpot (K)", random.uniform(250, 320))
            point.field("Tdew (degC)", random.uniform(-20, 30))
            point.field("rh (%)", random.uniform(0, 100))
            point.field("VPmax (mbar)", random.uniform(0, 50))
            point.field("VPact (mbar)", random.uniform(0, 50))
            point.field("VPdef (mbar)", random.uniform(0, 50))
            point.field("sh (g/kg)", random.uniform(0, 30))
            point.field("H2OC (mmol/mol)", random.uniform(0, 50))
            point.field("rho (g/m**3)", random.uniform(1, 2))
            point.field("wv (m/s)", random.uniform(0, 50))
            point.field("max. wv (m/s)", random.uniform(0, 80))
            point.field("wd (deg)", random.uniform(0, 360))
            point.field("rain (mm)", random.uniform(0, 50))
            point.field("raining (s)", random.uniform(0, 3600))
            point.field("SWDR (W/m)", random.uniform(0, 1500))
            point.field("PAR (ol/m/s)", random.uniform(0, 2000))
            point.field("max. PAR (ol/m/s)", random.uniform(0, 3000))
            point.field("Tlog (degC)", random.uniform(-20, 40))
            point.field("OT", random.uniform(400, 500))
        
        return point


    def run_performance_test(threshold_gb, test_count, insert_test_count=1000, batch_size=10):
        """执行全面性能测试，包括插入、查询、修改和删除"""
        nonlocal should_exit, csv_import_round  # 声明使用外部变量
        
        print(f"\n=== 开始磁盘容量达到 {threshold_gb}GB 的全面性能测试 ===")
        
        # 基于 CSV 导入轮数计算时间偏移量
        if csv_import_round > 1:
            time_offset = timedelta(hours=5 * (csv_import_round - 1))
        else:
            time_offset = timedelta(0)  # 第一轮导入时不产生时间偏移
        
        # 准备测试数据
        base_start_time = start_time + time_offset
        
        # 执行查询性能测试
        print("\n=== 开始查询性能测试 ===")
        query_times = []
        for i in range(test_count):
            # 准备测试数据
            test_sensor_id = f"{random.randint(0, num_sensors - 1):03}"
            
            # 随机选择测试时间范围，并加入轮数偏移
            test_start_time = base_start_time + timedelta(hours=random.randint(0, 5))
            test_end_time = test_start_time + timedelta(minutes=10)
            
            # 转换为 InfluxQL 时间格式
            start_time_str = test_start_time.isoformat() + "Z"
            end_time_str = test_end_time.isoformat() + "Z"

            # 随机选择几个字段进行查询
            valid_fields = [field for field in all_fields if field != "date"]
            fields = random.sample(all_fields, min(5, len(valid_fields)))
            field_filter = '|> filter(fn: (r) => ' + \
                ' or '.join([f'r["_field"] == "{field}"' for field in fields]) + ')'

            query = f'''
            from(bucket: "{bucket}")
            |> range(start: time(v: "{start_time_str}"), stop: time(v: "{end_time_str}"))
            |> filter(fn: (r) => r["_measurement"] == "weather_measurement")
            |> filter(fn: (r) => r["sensor_id"] == "{test_sensor_id}")
            {field_filter}  
            |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
            '''

            start = time.time()
            try:
                result = query_api.query(query=query)
                query_times.append(time.time() - start)
                
                # 输出查询结果到文件
                query_output_file = os.path.join(performance_output_dir, f"query_result_{threshold_gb}GB_{i}.txt")
                with open(query_output_file, 'w') as f:
                    f.write(f"查询时间范围: {start_time_str} 到 {end_time_str}\n")
                    f.write(f"传感器 ID: {test_sensor_id}\n")
                    f.write(f"查询字段: {', '.join(fields)}\n\n")
                    
                    use_pivot = "pivot(" in query
                    
                    if not result:
                        f.write("查询结果为空\n")
                    else:
                        if use_pivot:
                            columns = []
                            for table in result:
                                for record in table.records:
                                    columns = list(record.values.keys())
                                    break
                                break
                            
                            f.write("时间\t" + "\t".join(columns) + "\n")
                            
                            for table in result:
                                for record in table.records:
                                    time_str = str(record.get_time())
                                    values = [str(record.values.get(col, "nan")) for col in columns]
                                    f.write(f"{time_str}\t" + "\t".join(values) + "\n")
                        else:
                            for table in result:
                                for record in table.records:
                                    f.write(f"时间: {record.get_time()}, {record.get_field()}: {record.get_value()}\n")
                
                print(f"查询 {i+1}/{test_count} 结果已保存到: {query_output_file}")
  
            except Exception as e:
                print(f"查询 {i+1}/{test_count} 出错: {e}")
                query_times.append(None)
        
        # 执行修改性能测试（InfluxDB 中修改通过重写实现）
        print("\n=== 开始修改性能测试 ===")
        update_times = []
        for i in range(test_count):
            # 准备测试数据
            test_sensor_id = random.randint(0, num_sensors - 1)
            
            # 随机选择一个时间点，并加入轮数偏移
            test_time = base_start_time + timedelta(hours=random.randint(0, 5), 
                                                  seconds=random.randint(0, 360))
            
            # 为了测试多样性，每10次使用一次真实数据
            if i % 10 == 0  and not data.empty:
                random_row = data.iloc[random.randint(0, len(data)-1)]
                point = create_full_weather_point(test_sensor_id, test_time, random_row)
            else:
                point = create_full_weather_point(test_sensor_id, test_time)
            
            start = time.time()
            try:
                write_api.write(bucket=bucket, record=point)
                update_times.append(time.time() - start)
            except Exception as e:
                print(f"修改出错: {e}")
                update_times.append(None)
        
        # 执行删除性能测试
        print("\n=== 开始删除性能测试 ===")
        delete_times = []
        for _ in range(test_count):
            # 准备测试数据
            test_sensor_id = f"{random.randint(0, num_sensors - 1):03}"
            
            # 随机选择一个小时间范围，并加入轮数偏移
            test_start_time = base_start_time + timedelta(hours=random.randint(0, 5))
            delete_start = test_start_time + timedelta(seconds=random.randint(0, 60))
            delete_end = delete_start + timedelta(seconds=random.randint(60, 600))
            
            delete_start_str = delete_start.isoformat() + "Z"
            delete_end_str = delete_end.isoformat() + "Z"
            
            start = time.time()
            try:
                delete_api = client.delete_api()
                delete_api.delete(
                    start=delete_start_str,
                    stop=delete_end_str,
                    predicate=f'_measurement="weather_measurement" AND sensor_id="{test_sensor_id}"',
                    bucket=bucket,
                    org=org
                )
                delete_times.append(time.time() - start)
            except Exception as e:
                print(f"删除出错: {e}")
                delete_times.append(None)
        
        # 计算并打印性能结果
        def print_stats(operation, times):
            valid_times = [t for t in times if t is not None]
            if not valid_times:
                print(f"{operation} 性能测试: 无有效数据")
                return
            
            avg_time = np.mean(valid_times)
            min_time = np.min(valid_times)
            max_time = np.max(valid_times)
            p95_time = np.percentile(valid_times, 95)
            
            print(f"{operation} 性能测试 ({len(valid_times)} 次有效尝试):")
            print(f"  平均时间: {avg_time:.4f} 秒")
            print(f"  最小时间: {min_time:.4f} 秒")
            print(f"  最大时间: {max_time:.4f} 秒")
            print(f"  95% 响应时间: {p95_time:.4f} 秒")
        
        print("\n=== 性能测试汇总 ===")
        print_stats("查询", query_times)
        print_stats("修改/插入", update_times)
        print_stats("删除", delete_times)
        
        # 保存综合性能测试结果到文件
        output_file = os.path.join(performance_output_dir, f"overall_result_{threshold_gb}GB.txt")
        with open(output_file, 'w') as f:
            f.write(f"全面性能测试结果 ({threshold_gb}GB):\n\n")
            
            f.write("\n查询性能:\n")
            valid_query_times = [t for t in query_times if t is not None]
            if valid_query_times:
                f.write(f"  平均时间: {np.mean(valid_query_times):.4f} 秒\n")
                f.write(f"  最小时间: {np.min(valid_query_times):.4f} 秒\n")
                f.write(f"  最大时间: {np.max(valid_query_times):.4f} 秒\n")
                f.write(f"  95% 响应时间: {np.percentile(valid_query_times, 95):.4f} 秒\n")
            else:
                f.write("  无有效数据\n")
            
            f.write("\n修改/插入性能:\n")
            valid_update_times = [t for t in update_times if t is not None]
            if valid_update_times:
                f.write(f"  平均时间: {np.mean(valid_update_times):.4f} 秒\n")
                f.write(f"  最小时间: {np.min(valid_update_times):.4f} 秒\n")
                f.write(f"  最大时间: {np.max(valid_update_times):.4f} 秒\n")
                f.write(f"  95% 响应时间: {np.percentile(valid_update_times, 95):.4f} 秒\n")
            else:
                f.write("  无有效数据\n")
            
            f.write("\n删除性能:\n")
            valid_delete_times = [t for t in delete_times if t is not None]
            if valid_delete_times:
                f.write(f"  平均时间: {np.mean(valid_delete_times):.4f} 秒\n")
                f.write(f"  最小时间: {np.min(valid_delete_times):.4f} 秒\n")
                f.write(f"  最大时间: {np.max(valid_delete_times):.4f} 秒\n")
                f.write(f"  95% 响应时间: {np.percentile(valid_delete_times, 95):.4f} 秒\n")
            else:
                f.write("  无有效数据\n")
        
        print(f"全面性能测试结果已保存到: {output_file}")
        print(f"=== {threshold_gb}GB 全面性能测试完成 ===\n")
        
        # 检查是否完成了 100GB 测试
        if threshold_gb >= 100:
            print("已完成 100GB 性能测试，将在数据插入循环结束后退出程序")
            should_exit = True


    # 无限循环添加数据，直到满足退出条件
    try:
        while True:
            print(f"\n=== 开始第 {csv_import_round + 1} 次 CSV 数据循环插入 ===")
            
            # 遍历 CSV 数据的每一行并插入到 InfluxDB 中
            for index, row in data.iterrows():
                for sensor_id in range(num_sensors):
                    # 使用 iteration_count 增加时间戳，避免数据重复
                    current_time = start_time + timedelta(seconds=index + (csv_import_round * len(data)))
                    
                    # 创建包含所有字段的 Point 对象
                    point = create_full_weather_point(sensor_id, current_time, row)
                    
                    # 写入数据点
                    write_api.write(bucket=bucket, record=point)

                # 检查是否到了监测存储使用情况的时间
                current_time = time.time()
                if current_time - last_storage_check_time >= 300:
                    bucket_id = get_bucket_id()
                    if bucket_id:
                        disk_usage_gb = get_bucket_disk_usage_ssh(bucket_id)
                        if disk_usage_gb is not None:
                            print(f"存储桶 {bucket} (ID: {bucket_id}) 的磁盘占用: {disk_usage_gb:.2f} GB")
                            
                            # 检查是否达到阈值并执行性能测试
                            for threshold in sorted(capacity_thresholds.keys()):
                                if disk_usage_gb >= threshold and threshold not in completed_tests:
                                    run_performance_test(threshold, capacity_thresholds[threshold])
                                    completed_tests.add(threshold)
                            
                            # 检查是否应退出
                            if should_exit:
                                print("已满足退出条件，完成当前 CSV 循环后退出")
                                # 关闭客户端连接
                                client.close()
                                exit(0)
                        else:
                            print(f"无法获取存储桶 {bucket} 的磁盘使用情况")
                    last_storage_check_time = current_time
            csv_import_round += 1
    except KeyboardInterrupt:
        print("\n程序被用户中断，正在清理资源...")
    finally:
        # 确保在程序退出前关闭客户端连接
        client.close()
        print("资源已清理，程序已退出")

if __name__ == "__main__":
    main()
