import os
import urllib.request
import mysql.connector
from typing import Iterable
import scrapy
from scrapy import Request
import json


class NewAnnouncementsSpider(scrapy.Spider):
    name = "new_announcements"
    db_config = {
        'user': 'root',
        'password': '123456',
        'host': 3306,
        'database': '巨潮资讯'
    }

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.db = mysql.connector.connect(**self.db_config)
        self.cursor = self.db.cursor()
        self.create_table()

    def create_table(self):
        create_table_query = """
        CREATE TABLE IF NOT EXISTS announcements (
            id INT AUTO_INCREMENT PRIMARY KEY,
            adjunctUrl VARCHAR(255) NOT NULL,
            pdf_path VARCHAR(255) NOT NULL
        )
        """
        self.cursor.execute(create_table_query)
        self.db.commit()

    def start_requests(self):
        url = "https://www.cninfo.com.cn/new/hisAnnouncement/query"
        headers = {
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
        }
        data = {
            "pageNum": "1",
            "pageSize": "30",
            "column": "szse",
            "tabName": "fulltext",
            "plate": "",
            "stock": "",
            "searchkey": "",
            "secid": "",
            "category": "",
            "trade": "",
            "seDate": "2024-10-16~2025-04-17",
            "sortName": "",
            "sortType": "",
            "isHLtitle": "true"
        }
        yield Request(url=url, method='POST', headers=headers, body='&'.join([f"{k}={v}" for k, v in data.items()]),
                      callback=self.parse)

    def parse(self, response):
        for every in json.loads(response.text)["announcements"]:
            adjunctUrl = every["adjunctUrl"]
            announcementTitle = every["announcementTitle"]
            url = f"https://www.cninfo.com.cn/new/disclosure/detail?stockCode={every['secCode']}&announcementId={every['announcementId']}&orgId={every['orgId']}&announcementTime={every['adjunctUrl'].split('/')[1]}"
            yield Request(url, meta={'adjunctUrl': adjunctUrl, 'announcementTitle': announcementTitle},
                          callback=self.xxxx)

    def xxxx(self, response):
        adjunctUrl = response.meta.get('adjunctUrl')
        announcementTitle = response.meta.get('announcementTitle')
        PDF_url = f"https://static.cninfo.com.cn/{adjunctUrl}"
        pdf_folder = "pdfs"
        if not os.path.exists(pdf_folder):
            os.makedirs(pdf_folder)
        file_name = os.path.join(pdf_folder, f"{announcementTitle}.pdf")
        try:
            urllib.request.urlretrieve(PDF_url, file_name)
            relative_path = os.path.relpath(file_name)
            insert_query = "INSERT INTO announcements (adjunctUrl, pdf_path) VALUES (%s, %s)"
            self.cursor.execute(insert_query, (adjunctUrl, relative_path))
            self.db.commit()
            print(f"成功保存文件: {file_name} 并插入数据库")
        except Exception as e:
            print(f"保存文件或插入数据库失败: {e}")

    def closed(self, reason):
        self.cursor.close()
        self.db.close()
