"""
运行碳披露、碳中和系统
"""
import os
import re
import sys
import json
import requests
import datetime
import threading
import traceback
from time import sleep
from queue import Queue

from django.conf import settings
from django.views.decorators.csrf import csrf_exempt

from common.custom.logger import Log
from common.base.base_respons import retJson
from common.custom.pdf_analyst import PdfAnalyst
from common.custom.pdf_processor import PdfProcessor
from common.custom.excel_processor import read_indicators_from_excel1
from common.custom.excel_processor import read_indicators_from_excel2

if True:
    current_path = os.path.abspath(os.path.dirname(__file__))
    superior_path = os.path.join(current_path, "..")
    sys.path.append(superior_path)

my_logger = Log()
THREADS_NUMBER = 2 # 线程数
writable = True # 日志文件是否可写

system_1_indicators = read_indicators_from_excel1(os.path.join(settings.BASE_DIR, "data", "碳信息披露质量关键词.xls"))
system_2_indicators = read_indicators_from_excel2(os.path.join(settings.BASE_DIR, "data", "企业碳中和发展评价指标体系.xls") )

class Crawl_thread(threading.Thread):
    '''
    抓取线程类，继承线程类Thread
    '''
    def __init__(self, id, thread_id, pdf_queue_list):
        """
        pdf_queue_list: 所有线程的PDF任务列表 [[pdf1, pdf2,],]
        """
        threading.Thread.__init__(self) # 需要对父类的构造函数进行初始化
        self.id = id # 线程id
        self.thread_id = thread_id  # 线程id
        self.pdf_queue_list = pdf_queue_list # 所有线程的PDF任务列表
        self.queue = pdf_queue_list[id] # 任务队列
        self.timeout = 60*60  # 等待时间60分钟
        # pdf文件所在的文件夹
        self.pdf_base_path = "/code/media/uploads/pdfs_test"
    
    def run(self):
        '''
        线程在调用过程中就会调用对应的run方法
        :return:
        '''
        my_logger.info(f'启动线程：{self.thread_id}')
        self.crawl_spider()
        my_logger.info(f'退出线程：{self.thread_id}')

    def crawl_spider(self):
        while True:
            if self.queue.empty(): #如果队列为空，则跳出
                my_logger.info("队列为空，跳出")
                break
            else:
                filename, pno_start, pno_end = self.queue.get()
                filepath = os.path.join(self.pdf_base_path, f"{filename}").replace("\\", "/")

                # 提取PDF内容存储到self.pdf.document_info
                my_logger.info(f"当前工作的线程为：{self.thread_id}, 正在提取 {filename} PDF内容")
                try:
                    self.pdf = PdfProcessor(filepath, media_root=settings.MEDIA_ROOT) 
                    self.pdf.run()
                except Exception as e:
                    my_logger.error(f'错误: {str(e)}')
                    my_logger.error(self.pdf.message)

                my_logger.info(f"当前工作的线程为：{self.thread_id}, 正在采集：{filename} 系统1")
                try:
                    self.my_spider(filename, pno_start, pno_end, 1)
                except Exception as e:
                    my_logger.error(f'错误: {str(e)}')
                
                my_logger.info(f"当前工作的线程为：{self.thread_id}, 正在采集：{filename} 系统2")
                try:
                    self.my_spider(filename, pno_start, pno_end, 2)
                except Exception as e:
                    my_logger.error(f'错误: {str(e)}')
    
    def my_spider(self, filename, pno_start, pno_end, systemId):
        try:
            filepath = os.path.join(self.pdf_base_path, f"{filename}").replace("\\", "/")
            file = {
                "filepath": filepath,
                "pno_start": pno_start,
                "pno_end": pno_end,
            }
            excel_base_path = self.mkdir()
            indicators = system_1_indicators if systemId == 1 else system_2_indicators
            analysis_pdf = PdfAnalyst(file, indicators, systemId, w1=3.23, w2=3.37, w3=3.4, excel_base_path=excel_base_path, pdf=self.pdf)

            now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            log_info = f'{now_time}: {filename} 系统{systemId} success!\n'
            my_logger.info(f"{log_info}")

            global writable
            while True:
                if writable:
                    writable = False # 保证只有一个线程写入日志文件
                    write_to_log(log_info)
                    writable = True # 释放锁
                    break
                else:
                    sleep(1)
        except Exception as e:
            my_logger.error(f'错误: {str(e)}')

    def mkdir(self):
        # 在media/downloads/下按时间生成文件夹, 用于存放分析结果Excel
        now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        excel_base_path = os.path.join(settings.MEDIA_ROOT, "downloads", now_time)
        if not os.path.exists(excel_base_path):
            os.makedirs(excel_base_path)
        return excel_base_path


def get_pno_queue(filepath='pno.csv'):
    """
    描述：
        获取pno.csv文件中的数据
    返回值：
        pno_queue: [(filename, pno_start, pno_end)]
    """
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            text = f.read()
        pno_queue =  [] # PDF任务队列
        lines = text.split('\n')
        my_logger.info(f"共有个 {len(lines)-1} 个文件需要处理")
        for line in lines:
            line = line.split(',')

            if len(line) != 3:
                continue

            filename = line[0].strip()
            if filename == "PDF名称":
                continue

            pno_start = line[1].strip() if line[1].strip() != "0" else ""
            pno_end = line[2].strip() if line[2].strip() != "0" else ""
            pno_queue.append((filename, pno_start, pno_end))

        return pno_queue
    
    except Exception as e:
        my_logger.error(f'程序异常结束运行, 错误: {str(e)}')
        my_logger.error(traceback.format_exc())

def split_list(input_list, number):
    """
    描述： 
        将一个列表分成number份
    参数：
        input_list: 输入列表
        number: 分成的份数
    返回值：
        output_list: 分成number份的列表
        [queue(), ]
    """
    output_list = []
    length = len(input_list)
    if length < number:
        return input_list
    else:
        for i in range(number):
            if i == number - 1:
                output_list.append(input_list[i*length//number:])
            else:
                output_list.append(input_list[i*length//number:(i+1)*length//number])
    
    result = [] # 里面存queue类型的元素
    for i in range(number):
        result.append(Queue())
        for j in output_list[i]:
            result[i].put(j)

    return result

def write_to_log(log_info, log_filepath='/code/scripts/log_mutil.txt'):
    """
    描述：
        将信息写入日志文件
    """
    with open(log_filepath, 'a', encoding='utf-8') as f:
        f.write(log_info)

def clear_logs(log_filepath='/code/scripts/log_mutil.txt'):
    """
    描述：清空日志文件
    """
    with open(log_filepath, 'w', encoding='utf-8') as f:
        f.truncate(0)

def main():
    # 开始日志
    now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    write_to_log(f"\n\n{now_time}: START RUN \n")
    my_logger.info('程序开始运行')

    # 清空日志文件
    clear_logs()
    my_logger.info('清空日志文件')

    # 初始化PDF任务队列
    pno_queue = get_pno_queue(filepath='/code/scripts/pno.csv')
    my_logger.info(f'pno_queue size: {len(pno_queue)}')
    write_to_log(f'pno_queue size: {len(pno_queue)}\n')
    
    # 给每个线程分配任务
    pdf_queue_list = split_list(pno_queue, THREADS_NUMBER)

    # 初始化采集线程
    threads_list = []
    threads_id_list = [f"thread_{i+1}" for i in range(THREADS_NUMBER)]

    for id, thread_id in enumerate(threads_id_list):
        thread = Crawl_thread(id, thread_id, pdf_queue_list)
        thread.start() # 启动线程
        threads_list.append(thread)
        sleep(3) # 错开进程时间
    
    # 等待所有线程完成
    for t in threads_list:
        t.join()
    
    my_logger.info('程序结束运行')
    write_to_log('程序结束运行\n')

@csrf_exempt
def my_run_2(request):
    try:
        main()
        return retJson(code=1, msg="success")
    except Exception as e:
        my_logger.error(f'程序异常结束运行, 错误: {str(e)}')
        my_logger.error(traceback.format_exc())
        return retJson(code=0, msg=str(e))