import concurrent
import threading
import requests
from concurrent.futures import ThreadPoolExecutor
import time
import json, os

current_file_path = os.path.abspath(__file__)
current_dir = os.path.dirname(current_file_path)
parent_dir = os.path.dirname(current_dir)
article_id = 0

'''
API 并发测试开发
'''


def chinese_to_digits(chinese_digit):
    mapping = {
        "一": 1,
        "二": 2,
        "三": 3,
        "四": 4,
        "五": 5,
        "六": 6,
        "初一": 7,
        "初二": 8,
        "初三": 9,
        "高一": 10,
        "高二": 11,
        "高三": 12,
    }
    return mapping.get(chinese_digit, "")


def fetch_url(info):
    global article_id
    """
    发送请求到指定URL并返回响应状态码。
    """
    url = "http://10.43.125.170:9840/article/remark"
    payload = json.dumps(info)
    headers = {
        'Content-Type': 'application/json'
    }
    try:
        response = requests.request("POST", url, headers=headers, data=payload)
    except requests.RequestException as e:
        return f"Thread {threading.current_thread().name}: Error {str(e)}"
    print(response.text)
    data = json.loads(response.text)
    article_id = data['data']['articleId']
    print("Article ID:", article_id)


def analyze_test(request_infos=[], max_workers=2, repetitions=3):
    start_time = time.time()

    # 使用ThreadPoolExecutor管理线程
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交任务到线程池
        futures = [executor.submit(fetch_url, info) for info in request_infos for _ in range(repetitions)]

        # 收集所有线程的结果
        for future in concurrent.futures.as_completed(futures):
            print(future.result())

    end_time = time.time()
    print(f"\nAll requests finished in {end_time - start_time:.2f} seconds.")


# 从文件中读取info信息
def get_info_file(path, file_name=""):
    name = file_name
    if len(name) == 0:
        name, current_file_extension = os.path.splitext(os.path.basename(path))

    with open(path, 'r', encoding='utf-8') as file:
        first_line = file.readline()

    with open(path, 'r', encoding='utf-8') as file:
        lines = file.readlines()  # 读取所有行到列表中
        if lines:  # 确保文件不为空
            all_but_first = lines[1:]  # 从第二行开始切片，跳过第一行
            # 合并所有行成为一个字符串，或者直接遍历列表处理每一行
            rest_of_file_content = '\n'.join(all_but_first)

    chinese_dig = name.split("年级")[0] if len(name.split("年级")) > 1 else "三"
    grade = chinese_to_digits(chinese_dig)
    info = {
        "title": first_line,
        "content": rest_of_file_content,
        "grade": 10,
        "userName": "小明"
    }
    return info


# 整个文件夹的作文分析，每个文件分析1次
def analyze_folder(repetitions=1):
    root_dir = parent_dir + "./敏感词"
    file_names = os.listdir(root_dir)
    all_infos = []
    for name in file_names:
        if name.startswith("."):
            # 隐藏文件不处理
            continue
        path = root_dir + "/" + name
        print("==" * 20)
        print("即将分析文件: {}".format(path))
        print("==" * 20)
        info = get_info_file(path=path, file_name=name)
        all_infos.append(info)
    analyze_test(request_infos=all_infos, max_workers=1, repetitions=repetitions)
    # print(all_infos)


# 单个文件分析repetitions次
def analyze_one_file(path, file_name="", repetitions=3):
    info = get_info_file(path, file_name)
    analyze_test(request_infos=[info], repetitions=repetitions)
    # print(info)


if __name__ == "__main__":
    #单个文件执行 repetitions 次
    path = "三年级-10-敏感词.txt"
    # analyze_one_file(path, repetitions=1)
    #analyze_folder()
