# -*- coding: utf-8 -*-

import requests
import re
import openpyxl
import time
import pandas as pd
import random
import pdfplumber
import functools
import asyncio
import inspect
from concurrent.futures import ThreadPoolExecutor
import csv

class ThreadPoolCoroutineDecorator:
    def __init__(self, max_workers=None):
        self.max_workers = max_workers or (os.cpu_count() or 1) * 5
        self._pool = None

    def __call__(self, func):
        @functools.wraps(func)
        def wrapper(tasks, *args, **kwargs):
            task_chunks = self._chunk_tasks(tasks, self.max_workers)
            results = []
        
            with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                futures = [
                    executor.submit(
                        self._run_task_chunk,
                        func,
                        chunk,
                        args,
                        kwargs,
                    ) for chunk in task_chunks
                ]

            for future in futures:
                results.append(future.result())

            return results

        return wrapper

    @staticmethod
    def _chunk_tasks(tasks, num_chunks):
        chunk_size = len(tasks) // num_chunks + (len(tasks) % num_chunks >0)
        return [tasks[i*chunk_size:(i+1)*chunk_size] for i in range(num_chunks)]

    def _run_task_chunk(self, func, task_chunk, args, kwargs):
        if inspect.iscoroutinefunction(func):
            return asyncio.run(self._run_async_tasks(func, task_chunk, args, kwargs))
        else:
            return self._run_sync_tasks(func, task_chunk, args, kwargs)

    @staticmethod
    def _run_sync_tasks(func, task_chunk, args, kwargs):
        results = []
        for task in task_chunk:
            try:
                result = func(task, *args, **kwargs)
                results.append(result)
            except Exception as e:
                results.append((task, str(e)))
        return results

    @staticmethod
    def _run_async_tasks(func, task_chunk, args, kwargs):
        results = []
        for task in task_chunk:
            try:
                result = func(task, *args, **kwargs)
                results.append(result)
            except Exception as e:
                results.append((task, str(e)))
        return results

user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
            'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1',
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
        ]

def download_report(file_name, download_url, save_path):
    try:
        r = requests.get(download_url, headers={"User-Agent": random.choice(user_agents)})
        retry_num = 0
        max_retry = 5
        while retry_num <= max_retry:
            if r.status_code == 200:
                file_path = save_path + "/" + f"{file_name}.pdf"
                with open(file_path, "wb") as f:
                    f.write(r.content)
                print(f"txt文件下载成功！{file_name}")
                break
            else:
                print("txt文件下载失败！")
                print("重试{retry_num}次！")
                retry_num += 1
    except Exception as e:
        print("txt文件下载失败！")

def pdf_to_txt(pdf_path, text_path):
    try:
        with pdfplumber.open(pdf_path) as pdf:
            with open(text_path, "w", encoding="utf-8") as f:
                for page in pdf.pages:
                    text = page.extract_text()
                    if text:
                        f.write(text)
        print(f"Text extracted to: {text_path}")
        print("pdf文件下载成功！")
    except Exception as e:
        print("pdf文件下载失败！")

def get_report(df):
    @ThreadPoolCoroutineDecorator(6)
    def get_report_to_txt(args):
        i = args[0]
        txt_save_path = args[1]
        pdf_save_path = args[2]
        title = args[3]
        download_url = args[4]
        for j in range(5):
            try:
                title = title.replace("<em>","")
                title = title.replace("</em>","")
                download_report(title, download_url, pdf_save_path)
                # time.sleep(random.uniform(1, 2))
                pdf_path = pdf_save_path + "/" + f"{title}.pdf"
                text_path = txt_save_path + "/" + f"{title}.txt"
                pdf_to_txt(pdf_path, text_path)
                print(f"\n第{i}条记录下载成功")
                continue
            except Exception as e:
                print(f"\n第{i}条记录下载失败")
                print(e)
                print(f"重试{j+1}次运行")
                with open("/home/ubuntu/code/git/subject-word-extraction/data/logs/logs.csv", mode="w", newline=",") as file:
                    writer = csv.writer(file)
                    data = [i,txt_save_path,pdf_save_path,title,download_url]
                    writer.writerow(data)


    txt_save_path = "/home/ubuntu/code/git/subject-word-extraction/data/download_txt"
    pdf_save_path = "/home/ubuntu/code/git/subject-word-extraction/data/download_pdf"
    res = []
    for i in range(df.shape[0]):
        title = df.iloc[i, 3]
        stkcode = str(df.iloc[i, 1]).zfill(6)
        year = str(df.iloc[i,2])
        download_url = df.iloc[i, 5]
        res.append((i, txt_save_path, pdf_save_path, "{}_{}_{}".format(year, stkcode, title), download_url))

    get_report_to_txt(res)

def new_get_report(df):
    txt_save_path = "/home/ubuntu/code/git/subject-word-extraction/data/download_txt"
    pdf_save_path = "/home/ubuntu/code/git/subject-word-extraction/data/download_pdf"
    for i in range(df.shape[0]):
        title = df.iloc[i, 3]
        stkcode = str(df.iloc[i, 1]).zfill(6)
        year = str(df.iloc[i,2])
        title = "{}_{}_{}".format(year, stkcode, title)
        download_url = df.iloc[i, 5]
        download_report(title, download_url, txt_save_path)
        time.sleep(random.uniform(1, 2))
        pdf_path = pdf_save_path + "/" + f"{title}.pdf"
        text_path = txt_save_path + "/" + f"{title}.txt"
        pdf_to_txt(pdf_path, text_path)



if __name__ == "__main__":
    df = pd.read_csv("/home/ubuntu/code/git/subject-word-extraction/data/output/stock_url-2_1737886019.3975737.csv")
    get_report(df)
    # new_get_report(df)
    
    