# -*- coding:utf-8 -*-

import requests
import pandas as pd
import io
import PyPDF2
import os
import time
import random

from MySQLDatabase import MySQLDatabase

def get_ua():
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0;) Gecko/20100101 Firefox/61.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
    ]
    user_agent = random.choice(user_agents) # 随机抽取对象
    return user_agent

def write_text_from_url(pdf_url, file_name, save_path):
    """从URL读取Excel文件并返回DataFrame"""
    # 下载Excel文件
    max_retry = 5
    for i in range(max_retry):
        try:
            ua = get_ua()
            headers={
                'User-Agent': ua,
                'Accept-Language': 'en-US,en;q=0.9',
                # 添加其他必要的头信息
            }
            response = requests.get(pdf_url, headers=headers, timeout=10)
            is_successful = False
            if response.status_code == 200:
                try:
                    # 将内容读入BytesIO对象
                    bity_content = io.BytesIO(response.content)
                    pdf_content = PyPDF2.PdfReader(bity_content)
                    file_path = save_path + "/" + f"{file_name}.zip"
                    with open(file_path, "w", encoding="utf-8") as f:
                        for page in pdf_content.pages:
                            text = page.extract_text()
                            if text:
                                f.write(text)
                    is_successful = True
                except:
                    pdf_content.seek(0)  # 重置指针到开头
                    bity_content = io.BytesIO(response.content)
                    pdf_content = PyPDF2.PdfReader(bity_content)
                    file_path = save_path + "/" + f"{file_name}.zip"
                    with open(file_path, "w", encoding="utf-8") as f:
                        for page in pdf_content.pages:
                            text = page.extract_text()
                            if text:
                                f.write(text)
                return is_successful
            else:
                print(f"无法下载PDF文件，HTTP状态码：{response.status_code}")
        except Exception as e:
            print(f"重试{i+1}次")    

        time.sleep(5)    
    

def download_report(file_path):
    try:
        # 创建数据库对象
        db = MySQLDatabase()
        # 建立连接
        db.connect()
        # 时间戳
        timestamp = 1704038400000 # ms 2024-01-01 00:00:00
        # 查询SQL
        query_sql = f'''
        SELECT * FROM listed_company where download_success = 0 AND announcement_title LIKE "%年度报告%" and announcement_title NOT LIKE "%摘要%" and announcement_time >= {timestamp};
        '''

        query_result, columns = db.query_data(query_sql)
        query_df = pd.DataFrame(query_result, columns=columns)
        # 按照announcement_id数据去重
        # query_df = query_df.drop_duplicates(subset=['announcement_id'], keep='first')

        query_df["download_url"] = query_df["base_url"] + query_df["subset_url"]

        # query_df = query_df.iloc[681:,:]
        
        insert_data = []
        for i in range(query_df.shape[0]):
            print(f"{i+1}\{query_df.shape[0]}")
            download_url = query_df["download_url"].values[i]
            announcement_title = query_df["announcement_title"].values[i]
            is_downlaod_successful = write_text_from_url(download_url, announcement_title, file_path)
            query_id = query_df["id"].values[i]
            print(f"\n第 {i+1} 项 准备下载{announcement_title}")

            if is_downlaod_successful:
                # 更新 listed_company 是否下载成功标识
                update_sql = '''
                UPDATE listed_company SET download_success = %s WHERE id = '%s'
                ''' % (1, query_id)
                rows_affected = db.update_data(update_sql)
                print(f"更新成功，影响行数: {rows_affected}")
            
            company_code = query_df["company_code"].values[i]
            company_name = query_df["company_name"].values[i]
            announcement_time = query_df["announcement_time"].values[i]
            
            save_path = file_path + "/" + f"{announcement_title}.zip"
            mime_type = "application/zip"
            size = os.path.getsize(save_path)

            insert_data.append((query_id, company_code, company_name, announcement_time, f"{announcement_title}.zip", save_path, mime_type, size))

            if i % 5 == 0:
                # 批量插入
                insert_sql = '''
                INSERT INTO listed_company_report (id, company_code, company_name, announcement_time, announcement_title, save_path, mime_type, size)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
                '''
                db._insert_data(insert_sql, insert_data)
                insert_data = []

        print("数据下载完成！")

    except Exception as e:
        raise Exception(f'download_report error: {str(e)}')
    finally:
        db.close()
    
if __name__ == "__main__":
    file_path = "/home/ubuntu/data/annual_report"
    download_report(file_path)