"""
运行碳披露、碳中和系统
"""
import os
import re
import sys
import ctypes
import datetime
import traceback
from time import sleep
import multiprocessing
import time

from django.conf import settings
from django.views.decorators.csrf import csrf_exempt

from common.custom.logger import Log
from common.base.base_respons import retJson
from common.custom.pdf_analyst import PdfAnalyst
from common.custom.pdf_processor import PdfProcessor
from common.custom.excel_processor import read_indicators_from_excel1
from common.custom.excel_processor import read_indicators_from_excel2

from common.custom.keywords_processor import split_keywords_with_comma

if True:
    current_path = os.path.abspath(os.path.dirname(__file__))
    superior_path = os.path.join(current_path, "..")
    sys.path.append(superior_path)

my_logger = Log()
THREADS_NUMBER = 2 # 线程数
PROCESS_NUMBER = 3 # 进程数

def mkdir():
    # 在media/downloads/下按时间生成文件夹, 用于存放分析结果Excel
    now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
    excel_base_path = os.path.join(settings.MEDIA_ROOT, "downloads", now_time)
    if not os.path.exists(excel_base_path):
        os.makedirs(excel_base_path)
    return excel_base_path

def get_pno_list(pno_info):
    """
    描述：
        获取pno.csv文件中的数据
    返回值：
        pno_queue: [(filename, pno_start, pno_end)]
    """
    try:
        pno_queue =  [] # PDF任务队列
        lines = pno_info.split('\n')
        for line in lines:
            line = line.split(',')

            if len(line) != 3:
                continue

            filename = line[0].strip()
            if filename == "PDF名称":
                continue

            pno_start = line[1].strip() if line[1].strip() != "0" else ""
            pno_end = line[2].strip() if line[2].strip() != "0" else ""
            pno_queue.append((filename, pno_start, pno_end))

        return pno_queue
    
    except Exception as e:
        my_logger.error(f'程序异常结束运行, 错误: {str(e)}')
        my_logger.error(traceback.format_exc())
        write_to_log(f'程序异常结束运行, 错误: {str(e)}', writable=True)
        write_to_log(traceback.format_exc(), writable=True)
    
def write_to_log(log_info, writable, log_filepath=os.path.join(settings.BASE_DIR, "scripts", "log_mutil.txt")):
    """
    描述：将信息写入日志文件
    """
    while True:
        if writable:
            writable = False # 保证只有一个进程写入日志文件
            with open(log_filepath, 'a', encoding='utf-8') as f:
                f.write(log_info)
            writable = True # 释放锁
            break
        else:
            sleep(1)

def clear_logs(log_filepath=os.path.join(settings.BASE_DIR, "scripts", "log_mutil.txt")):
    """
    描述：清空日志文件
    """
    # 判断log_filepath是否存在
    if os.path.exists(log_filepath):
        with open(log_filepath, 'w', encoding='utf-8') as f:
            f.truncate(0)
    else:
        with open(log_filepath, 'w', encoding='utf-8') as f:
            f.write("###")

def split_list(input_list, number):
    """
    描述： 
        将一个列表分成number份
    参数：
        input_list: 输入列表
        number: 分成的份数
    返回值：
        output_list: 分成number份的列表
        [queue(), ]
    """
    output_list = []
    result = []
    length = len(input_list)
    if length < number:
    # 修复当length < number时输出的是input_list(type:list)导致类型不匹配的问题
        for i in range(length):
            result.append(multiprocessing.Queue())
            result[i].put(input_list[i])
            return result
    else:
        for i in range(number):
            if i == number - 1:
                output_list.append(input_list[i*length//number:])
            else:
                output_list.append(input_list[i*length//number:(i+1)*length//number])
    
    result = [] # 里面存queue类型的元素
    for i in range(number):
        result.append(multiprocessing.Queue())
        for j in output_list[i]:
            result[i].put(j)

    return result

def process_theme_word(theme_word):
    """
    描述：处理主题词
    参数: theme_word str 主题词
    返回: theme_word: List[str] 主题词列表
    """
    theme_word = split_keywords_with_comma(theme_word)
    theme_word = theme_word.replace('\r', '').replace('\n', '')
    theme_word = theme_word.split(',')
    return theme_word

def Mutil_process(pno_queue, writable, theme_word_global):
    process_logger = Log()
    
    theme_word_global_in = process_theme_word(theme_word_global.value)
    
    pdf_base_path = os.path.join(settings.BASE_DIR, "media", "uploads", "pdfs_test")
    system_1_indicators = read_indicators_from_excel1(os.path.join(settings.BASE_DIR, "data", "碳信息披露质量关键词.xls"))
    system_2_indicators = read_indicators_from_excel2(os.path.join(settings.BASE_DIR, "data", "企业碳中和发展评价指标体系.xls"))
    while True:
        print(type(pno_queue))
        if pno_queue.empty():
            process_logger.info("队列为空，跳出")
            break
        else:
            filename, pno_start, pno_end = pno_queue.get()
            filepath = os.path.join(pdf_base_path, f"{filename}").replace("\\", "/")
            process_logger.info(f"正在提取 {filename} PDF内容")
            try:
                pdf = PdfProcessor(filepath, media_root=settings.MEDIA_ROOT)
                pdf.run()
            except Exception as e:
                process_logger.error(f'错误: {str(e)}')
                process_logger.error(pdf.message)
                process_logger.error(traceback.format_exc())
                write_to_log(f'错误: {str(e)}', writable.value)
                write_to_log(pdf.message, writable.value)
                write_to_log(traceback.format_exc(), writable.value)

            process_logger.info(f"正在采集 {filename} 系统1")
            try:
                file = {
                    "filepath": filepath,
                    "pno_start": pno_start,
                    "pno_end": pno_end,
                }
                excel_base_path = mkdir()
                analysis_pdf = PdfAnalyst(file, system_1_indicators, systemId=1, w1=3.23, w2=3.37, w3=3.4, excel_base_path=excel_base_path, keywords_normal=theme_word_global_in, pdf=pdf)
                now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                log_info = f'{now_time}: {filename} 系统1 success!\n'
                process_logger.info(f"{log_info}")
                write_to_log(log_info, writable.value)
            except Exception as e:
                process_logger.error(f'错误: {str(e)}')
                process_logger.error(traceback.format_exc())
                write_to_log(f'程序异常结束运行, 错误: {str(e)}', writable.value)
                write_to_log(traceback.format_exc(), writable.value)

            process_logger.info(f"正在采集 {filename} 系统2")
            try:
                file = {
                    "filepath": filepath,
                    "pno_start": pno_start,
                    "pno_end": pno_end,
                }
                excel_base_path = mkdir()
                analysis_pdf = PdfAnalyst(file, system_2_indicators, systemId=2, w1=3.23, w2=3.37, w3=3.4, excel_base_path=excel_base_path, keywords_normal=theme_word_global_in, pdf=pdf)
                now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                log_info = f'{now_time}: {filename} 系统2 success!\n'
                process_logger.info(f"{log_info}")
                write_to_log(log_info, writable.value)
            except Exception as e:
                process_logger.error(f'错误: {str(e)}')
                process_logger.error(traceback.format_exc())
                write_to_log(f'程序异常结束运行, 错误: {str(e)}', writable.value)
                write_to_log(traceback.format_exc(), writable.value)


def main(pno_info, theme_word):
    writeable = multiprocessing.Value("i", 1) # 写入log的锁
    
    theme_word_global = multiprocessing.Manager().Value(ctypes.c_char_p, theme_word) # 主题词
    
    # 开始日志
    now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    write_to_log(f"\n\n{now_time}: START RUN \n", writeable.value)
    my_logger.info('程序开始运行')
    begin_time = time.time()

    # 清空日志文件
    clear_logs()
    my_logger.info('清空日志文件')

    # 初始化PDF任务队列
    pno_list = get_pno_list(pno_info)
    splited_queue = split_list(pno_list, PROCESS_NUMBER)

    my_logger.info(f"共有 {len(pno_list)} 个文件需要处理")
    write_to_log(f'共有 {len(pno_list)} 个文件需要处理\n', writeable.value)

    process_list = []
    for i in range(min(PROCESS_NUMBER, len(splited_queue))):
        print(splited_queue[i])
        process = multiprocessing.Process(target=Mutil_process, args=(splited_queue[i], writeable, theme_word_global))
        process_list.append(process)
        process.start()

    for process in process_list:
        process.join()
    
    # 等待所有进程完成    
    my_logger.info('程序结束运行')
    end_time = time.time()
    write_to_log('程序结束运行\n', writeable.value)
    my_logger.info(f"程序执行用时:{end_time - begin_time}")
    write_to_log(f"程序执行用时:{(end_time - begin_time)/60}分钟",writable=writeable.value)

@csrf_exempt
def my_run_for_page_process(request):
    if request.method == 'POST':
        pass
    else:
        return retJson(code=0, msg="使用POST请求")
    try:
        # 获取post参数
        pno_info = request.POST.get('pno_info')
        theme_word = request.POST.get('theme_word')    

        if pno_info == "":
            return retJson(code=0, msg="pno_info不能为空")
    
        main(pno_info, theme_word)
        return retJson(code=1, msg="success")
    except Exception as e:
        my_logger.error(f'程序异常结束运行, 错误: {str(e)}')
        my_logger.error(traceback.format_exc())
        return retJson(code=0, msg=str(e))