import os
import sys

import pandas as pd
import requests
from bs4 import BeautifulSoup
import json
import re
from datetime import datetime
from xlutils.copy import copy
from xlrd import open_workbook
import xlwt
import time
import random

class QccEnterpriseCrawler:
    def __init__(self):
        self.session = requests.Session()
        aget=['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.37 (KHTML, like Gecko) Chrome/94.0.4026.54 Safari/537.36',
              'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/532.32 (KHTML, like Gecko) Chrome/94.0.4026.54 Safari/532.32',
              'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/534.34 (KHTML, like Gecko) Chrome/95.0.4027.54 Safari/532.34',
              'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/532.32 (KHTML, like Gecko) Chrome/92.0.4028.54 Safari/532.30',
              'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Chrome/92.0.4029.33 Safari/529.329',
              'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Edge/96.0.1054.62',
              'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0'
              ]
        cookiearr=[
                  #'qcc_did=cookie1',
                  # 'qcc_did=cookie2 '
                   ]
        self.headers = {
            'user-agent': random.choice(aget),
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'upgrade-insecure-requests': '1',
            'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'sec-ch-ua': f'"Chromium";v="{random.randint(90, 112)}",  "Google Chrome";v="",  "Not:A-Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': random.choice(['Windows', 'macOS', 'Linux']),
            'accept-encoding': random.choice(['gzip,  deflate, br', 'gzip, deflate']),
            'x-requested-with': 'XMLHttpRequest' if random.random() > 0.7 else '',
            'connection': 'keep-alive',
            'host': 'www.riskbird.com',
            'referer': 'https://www.qcc.com/nsearch?key',
            'cookie':random.choice(cookiearr), # 你的cookie
        }


        self.output_columns = [
            "企业名称", "统一信用代码", "法定代表人",
            "注册资本", "成立日期", "经营状态",
            "注册地址", "联系方式", "企业邮箱",
            "企业类型"
        ]
        # 初始化输出文件
        self.initialize_output_file("enterprise_info.xls")

    def initialize_output_file(self, filename):
        """初始化带表头的Excel文件"""
        if not os.path.exists(filename):
            wb = xlwt.Workbook()
            ws = wb.add_sheet(' 企业信息')
            # 写入表头
            for col, header in enumerate(self.output_columns):
                ws.write(0, col, header)
            wb.save(filename)
            print(f"已创建新文件：{filename}")

    def load_companies(self, input_file):
        """从Excel读取待查询企业列表"""
        df = pd.read_excel(input_file)
        return df['单位名称'].dropna().tolist()

    def query_company(self, company_name):
        """执行企业查询"""
        url = f"https://www.qcc.com/web/search?key={company_name}"

        # time.sleep(random.uniform(3, 20))
        try:
            response = self.session.get(url, headers=self.headers, timeout=15)

            return self.parse_response(response.text)

        except Exception as e:
            print(f"查询失败: {company_name} - {str(e)}")
            return None

            # parse_response和process_search_result方法保持不变...

    def save_single_record(self, company_data, output_file):
        """保存单条记录到Excel"""
        try:
            # 打开现有工作簿
            rb = open_workbook(output_file, formatting_info=True)
            workbook = copy(rb)
            sheet = workbook.get_sheet(0)

            # 获取当前行数
            current_row = rb.sheet_by_index(0).nrows

            # 写入数据
            for col_idx, col_name in enumerate(self.output_columns):
                sheet.write(current_row, col_idx, company_data.get(col_name, ''))

            # 立即保存
            workbook.save(output_file)
            print(f"已保存：{company_data.get(' 企业名称', '')}")

        except Exception as e:
            print(f"保存失败：{str(e)}")


    def parse_response(self, html):
        """解析HTML响应"""
        soup = BeautifulSoup(html, 'html.parser')
        script = soup.find('script', string=re.compile('window.__INITIAL_STATE__'))
        if not script:
            return None

        json_str = re.search(r'window\.__INITIAL_STATE__\s*=\s*({.*?});',
                             script.string, re.DOTALL).group(1)
        try:
            data = json.loads(json_str, strict=False)
        except json.JSONDecodeError:
            return None

        return self.process_search_result(data.get('search', {}).get('searchRes', {}))


    # def process_search_result(self, result):
    #     """处理查询结果"""
    #     if result.get('Status') != 200:
    #         return None
    #
    #     companies = []
    #     for item in result.get('Result', []):
    #         company_data = {
    #             "企业名称": item.get('Name', '').replace('<em>', ''),
    #             "统一信用代码": item.get('CreditCode'),
    #             "法定代表人": item.get('OperName'),
    #             "注册资本": f"{item.get('RegistCapi', '')}万元",
    #             "成立日期": self.format_timestamp(item.get('StartDate')),
    #             "经营状态": item.get('Status'),
    #             "注册地址": item.get('Address'),
    #             "联系方式": item.get('ContactNumber'),
    #             "企业邮箱": item.get('Email'),
    #             "企业类型": item.get('EconKind')
    #         }
    #         companies.append(company_data)
    #     return companies
    def process_search_result(self, result):
        """处理查询结果"""
        if result.get('Status') != 200:
            return None

        companies = []
        # 获取第一条数据
        first_item = result.get('Result', [])[0] if result.get('Result') else None
        if not first_item:
            return None

        company_data = {
            "企业名称": first_item.get('Name', '').replace('<em>', ''),
            "统一信用代码": first_item.get('CreditCode'),
            "法定代表人": first_item.get('OperName'),
            "注册资本": f"{first_item.get('RegistCapi', '')}万元",
            "成立日期": self.format_timestamp(first_item.get('StartDate')),
            "经营状态": first_item.get('Status'),
            "注册地址": first_item.get('Address'),
            "联系方式": first_item.get('ContactNumber'),
            "企业邮箱": first_item.get('Email'),
            "企业类型": first_item.get('EconKind')
        }
        companies.append(company_data)
        return companies

    def format_timestamp(self, ts):
        """转换时间戳"""
        if not ts or ts == 0:
            return '长期'
        try:
            dt = datetime.fromtimestamp(ts / 1000) if ts > 1e12 else datetime.fromtimestamp(ts)
            return dt.strftime('%Y-%m-%d')
        except:
            return ''
if __name__ == "__main__":
    crawler = QccEnterpriseCrawler()

    # 配置参数
    input_file = "爬去单位列表.xlsx"
    output_file = "enterprise_info.xls"

    # 读取待查询企业
    companies = crawler.load_companies(input_file)

    # 遍历查询并实时保存
    i=0
    for name in companies:
        i=i+1
        print(f"正在查询：{name}{i}")

        if company_data := crawler.query_company(name):
            # 处理可能的多条结果
            for record in company_data:
                crawler.save_single_record(record, output_file)
        else:
            print(f"未查询到：{name}")