# -*- coding:utf-8 -*-

import requests
import uuid
import time

from MySQLDatabase import MySQLDatabase

def get_info(start_date, end_date, page_num=1):
    try:
        url = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
        data = {
            'pageNum':page_num,
            'pageSize':30,
            'column':'szse',
            'tabName': 'fulltext',
            'plate': '',
            'stock': '',
            'searchkey': '',
            'secid': '',
            'category':'category_ndbg_szsh',
            'trade': '',
            'seDate':'{}~{}'.format(start_date, end_date),
            'sortName': '',
            'sortType': '',
            'isHLtitle': 'true'
        }

        headers = {
            'Accept': '*/*',
            'Accept-Encoding':'gzip, deflate',
            'Accept-Language':'zh-CN,zh;q=0.9',
            'Connection':'keep-alive',
            'Content-Length':'165',
            'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
            'Host':'www.cninfo.com.cn',
            'Origin':'http://www.cninfo.com.cn',
            'Referer':'http://www.cninfo.com.cn/new/commonUrl/pageOfSearch?url=disclosure/list/search&lastPage=index',
            'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
            'X-Requested-With':'XMLHttpRequest'
        }
        try:
            r = requests.post(url, data=data, headers=headers)
            if r.status_code == 200:
                result = r.json()
                return result
            else:
                raise Exception(f'HTTP Code: {r.status_code}')
        except Exception as e:
            raise Exception(f'HTTP: {str(e)}')
    except Exception as e:
        raise Exception(f'get_info error: {str(e)}')

def get_total_pages_num(start_date, end_date):
    try:
        result = get_info(start_date=start_date, end_date=end_date)
        total_pages_num = result['totalpages']
        return total_pages_num
    except Exception as e:
        raise Exception(f'get_total_pages_num error: {str(e)}')

def crawl_url(start_date, end_date):
    try:
        total_pages_num = get_total_pages_num(start_date=start_date, end_date=end_date)
        if total_pages_num > 0:
            # 创建数据库对象
            db = MySQLDatabase()
            # 建立连接
            db.connect()
            # 准备要插入的数据
            for page_num in range(total_pages_num):
                print(f'\n爬取第 {page_num+1} 页')
                page_result = get_info(start_date=start_date, end_date=end_date, page_num=page_num+1)
                announcements = page_result['announcements']
                data_to_insert = []
                base_url = 'http://static.cninfo.com.cn/'
                if len(announcements) > 0:
                    for announcement in announcements:
                        id = str(uuid.uuid4())
                        secCode = announcement['secCode'] if announcement['secCode'] is not None else ''
                        secName = announcement['secName'] if announcement['secName'] is not None else ''
                        orgId = announcement['orgId']
                        announcementId = announcement['announcementId']
                        announcementTitle = announcement['announcementTitle']
                        announcementTime = str(announcement['announcementTime'])
                        adjunctUrl = announcement['adjunctUrl']
                        # if '年报' in announcementTitle and '摘要' not in announcementTitle:
                        # if '年报' in announcementTitle:
                        data_to_insert.append((id, secCode, secName, orgId, announcementId, announcementTime, announcementTitle, base_url, adjunctUrl, 0))
                    if len(data_to_insert) > 0 :
                        db.insert_data(data_to_insert)  
                    time.sleep(1)
            # 关闭连接
            db.close()
    except Exception as e:
        raise Exception(f"crawl_url error: {str(e)}")

if __name__ == '__main__':
    # ('2000-01-01', '2000-12-31'), ('2001-01-01', '2001-12-31'), ('2002-01-01', '2002-12-31'), ('2003-01-01', '2003-12-31'),
    #                                ('2004-01-01', '2004-12-31'), ('2005-01-01', '2005-12-31'), ('2006-01-01', '2006-12-31'), ('2007-01-01', '2007-12-31'),
    #                                ('2008-01-01', '2008-12-31'), ('2009-01-01', '2009-12-31'), ('2010-01-01', '2010-12-31'), ('2011-01-01', '2011-12-31'),
    #                                ('2012-01-01', '2012-12-31'), ('2013-01-01', '2013-12-31'), ('2014-01-01', '2014-12-31'), ('2015-01-01', '2015-12-31'),
                                #    ('2016-01-01', '2016-12-31'), ('2017-01-01', '2017-12-31'), ('2018-01-01', '2018-12-31'), ('2019-01-01', '2019-12-31'),
                                #    ('2020-01-01', '2020-12-31'),
    # for (start_date, end_date) in [ ('2021-01-01', '2021-12-31'), ('2022-01-01', '2022-12-31'), ('2023-01-01', '2023-12-31'),
    #                                ('2024-01-01', '2024-12-31'), ('2025-01-01', '2025-02-28')]:
    for (start_date, end_date) in [('2025-03-01', '2025-05-11')]:
        print(f"\n({start_date}, {end_date})")
        crawl_url(start_date, end_date)
