import os
import requests
import time
import random
import json
from urllib.parse import urljoin
from datetime import datetime, timedelta
import sys
import logging

"""
   北交所定期财报
   用法1: python bse.py (使用默认3年范围)
   用法2: python bse.py 开始日期 结束日期 (格式: YYYY-MM-DD)
"""

def get_next_log_filename(base_filename):
    """获取下一个可用的日志文件名，添加_n后缀"""
    counter = 1
    filename = f"{base_filename}.log"

    while os.path.exists(os.path.join("logs", "bse", filename)):
        filename = f"{base_filename}_{counter}.log"
        counter += 1

    return filename

def setup_logging():
    """配置日志记录，同时输出到控制台和文件"""
    # 创建日志目录（如果需要）
    log_dir = os.path.join(os.getcwd(), "logs", "bse")
    os.makedirs(log_dir, exist_ok=True)

    # 基础文件名（不含后缀）
    base_filename = f"bse{datetime.now().strftime('%Y%m%d')}"

    # 获取下一个可用的日志文件名
    log_filename = get_next_log_filename(base_filename)
    log_filepath = os.path.join(log_dir, log_filename)

    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_filepath, encoding='utf-8'),
            logging.StreamHandler(sys.stdout)
        ]
    )
    return logging.getLogger()

# 初始化日志
logger = setup_logging()

# 请求配置
base_url = "https://www.bse.cn/disclosureInfoController/companyAnnouncement.do"

def get_random_user_agent():
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15"
    ]
    return random.choice(user_agents)

# 伪装UA
headers = {
    "User-Agent": get_random_user_agent(),
    "Accept": "application/json, text/javascript, */*; q=0.01",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Accept-Encoding": "gzip, deflate, br",
    "Referer": "https://www.bse.cn/disclosure/announcement.html",
    "Connection": "keep-alive"
}

def get_date_range():
    """获取日期范围"""
    # 如果没有提供参数，使用默认的3年范围
    if len(sys.argv) == 1:
        today = datetime.now()
        three_years_ago = today - timedelta(days=3*365)
        first_date = three_years_ago.replace(month=1, day=1).strftime("%Y-%m-%d")
        second_date = today.strftime("%Y-%m-%d")
        return first_date, second_date

    # 如果提供了参数但数量不对
    if len(sys.argv) != 3:
        logger.error("错误：参数数量不正确")
        logger.info("用法1: python bse.py (使用默认3年范围)")
        logger.info("用法2: python bse.py 开始日期 结束日期 (格式: YYYY-MM-DD)")
        sys.exit(1)

    # 验证日期格式
    try:
        start_date = datetime.strptime(sys.argv[1], "%Y-%m-%d").strftime("%Y-%m-%d")
        end_date = datetime.strptime(sys.argv[2], "%Y-%m-%d").strftime("%Y-%m-%d")

        # 检查开始日期是否早于结束日期
        if start_date > end_date:
            logger.error("错误：开始日期不能晚于结束日期")
            sys.exit(1)

        return start_date, end_date

    except ValueError as e:
        logger.error(f"错误：日期格式不正确 - {e}")
        logger.info("请使用 YYYY-MM-DD 格式的日期，例如: 2025-01-01")
        sys.exit(1)

def build_params(page=0):
    start_date, end_date = get_date_range()
    return {
        "callback": "jQuery331_" + str(int(time.time() * 1000)),
        "isNewThree": "1",
        "xxfcbj[]": "2",
        "needFields[]": ["companyCd", "companyName", "disclosureTitle",
                         "disclosurePostTitle", "destFilePath", "publishDate"],
        "sortfield": "xxssdq",
        "sorttype": "asc",
        "startTime": start_date,
        "endTime": end_date,
        "disclosureSubtype[]": [
            "9503-1001", "9503-1005", "9503-1002", "9503-1006",
            "9503-1003", "9504-8001", "9503-1004", "9504-2106"
        ],
        "page": page
    }

def random_delay():
    delay_pattern = random.choice([
        random.uniform(0.5, 1),
        random.uniform(2, 3),
        random.uniform(4, 6)
    ])
    time.sleep(delay_pattern)

    if random.randint(1, 10) == 1:
        time.sleep(random.uniform(7, 8))

def download_file(url, filepath):
    """下载文件并保存到指定路径"""
    try:
        random_delay()
        response = requests.get(url, stream=True, headers=headers)
        response.raise_for_status()

        os.makedirs(os.path.dirname(filepath), exist_ok=True)

        # 临时文件路径
        temp_filepath = filepath + '.tmp'

        try:
            with open(temp_filepath, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)

            # 下载完成后重命名临时文件为正式文件
            os.rename(temp_filepath, filepath)
            logger.info(f"成功下载: {filepath}")

        except Exception as e:
            # 如果下载过程中出错，删除临时文件
            if os.path.exists(temp_filepath):
                os.remove(temp_filepath)
            raise e

    except Exception as e:
        # 如果文件已存在但下载失败，删除可能不完整的文件
        if os.path.exists(filepath):
            try:
                os.remove(filepath)
                logger.warning(f"已删除不完整文件: {filepath}")
            except Exception as delete_error:
                logger.error(f"删除文件失败 {filepath}: {delete_error}")

        logger.error(f"下载失败 {url}: {e}")

def parse_response(text):
    """解析JSONP响应为JSON对象"""
    try:
        # 去除回调函数包装
        json_str = text[text.find('(')+1:text.rfind(')')]
        return json.loads(json_str)
    except Exception as e:
        logger.error(f"解析响应失败: {e}")
        return None

def process_announcements():
    """处理公告数据"""
    try:
        # 第一次请求获取总数量
        params = build_params(0)
        random_delay()
        response = requests.get(base_url, headers=headers, params=params)
        response.raise_for_status()

        data = parse_response(response.text)
        if not data or len(data) == 0:
            logger.error("没有获取到有效数据")
            return

        list_info = data[0].get("listInfo", {})
        total_count = list_info.get("totalElements", 0)
        logger.info(f"总公告数: {total_count}")

        if total_count == 0:
            logger.info("没有找到公告")
            return

        # 计算总页数
        page_size = list_info.get("size", 20)
        total_pages = (total_count + page_size - 1) // page_size

        # 处理每一页
        for page_num in range(0, total_pages):
            logger.info(f"\n正在处理第 {page_num + 1}/{total_pages} 页...")
            params = build_params(page_num)

            random_delay()
            response = requests.get(base_url, headers=headers, params=params)
            response.raise_for_status()

            page_data = parse_response(response.text)
            if not page_data or len(page_data) == 0:
                logger.warning(f"第 {page_num + 1} 页没有获取到有效数据")
                continue

            list_info = page_data[0].get("listInfo", {})
            announcements = list_info.get("content", [])

            # 处理每条公告
            for announcement in announcements:
                try:
                    sec_code = announcement.get("companyCd", "")
                    sec_name = announcement.get("companyName", "")
                    title = announcement.get("disclosureTitle", "")
                    publish_date = announcement.get("publishDate", "")
                    attach_path = announcement.get("destFilePath", "")

                    if not all([sec_code, sec_name, title, publish_date, attach_path]):
                        logger.warning(f"跳过不完整数据: {announcement}")
                        continue

                    # 创建目录
                    dir_name = f"{sec_code}_{sec_name}"
                    dir_name = "".join(c for c in dir_name if c not in '\\/:*?"<>|')
                    base_dir = os.path.join(os.getcwd(), "report", "bse")
                    dir_path = os.path.join(base_dir, dir_name)

                    # 创建文件名
                    file_name = f"{title}_{publish_date}.pdf"
                    file_name = "".join(c for c in file_name if c not in '\\/:*?"<>|')
                    file_path = os.path.join(dir_path, file_name)

                    # 检查文件是否已存在
                    if os.path.exists(file_path):
                        logger.info(f"文件已存在，跳过下载: {file_path}")
                        continue

                    # 下载文件
                    download_url = urljoin("https://www.bse.cn/", attach_path)
                    download_file(download_url, file_path)

                except Exception as e:
                    logger.error(f"处理公告时出错: {e}")
                    continue

    except Exception as e:
        logger.error(f"处理过程中出错: {e}")
        raise

if __name__ == "__main__":
    try:
        logger.info("="*50)
        logger.info(f"程序开始运行 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        logger.info(f"命令行参数: {sys.argv}")

        process_announcements()

        logger.info("处理完成")
        logger.info(f"程序结束运行 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        logger.info("="*50)
    except KeyboardInterrupt:
        logger.warning("\n用户中断操作")
        sys.exit(1)
    except Exception as e:
        logger.error(f"程序运行出错: {e}")
        sys.exit(1)