import csv
import requests
import yaml
import time
import random
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
from queue import Queue

class CSVProgressCrawler:
    def __init__(self, config_path='config.yaml'):
        self.license_type_mapping = {
            201: "营业执照",
            202: "食品经营许可证(有限责任公司)",
            222: "餐饮服务许可证",
            335: "食品经营许可证(个体工商户)",
            2028: "其他小餐饮经营许可证"
        }
        self.config = self.load_config(config_path)
        self.session = requests.Session()
        self.session.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
            'Cookie': self.config.get('cookie', '')
        }
        self.base_url = "https://ecom.meituan.com"
        self.max_workers = self.config.get('max_workers', 15)
        self.batch_size = self.config.get('batch_size', 100)
        self.store_queue = Queue()
        self.qual_queue = Queue()
        self.all_poi_ids = []
        self._init_csv_files()
        threading.Thread(target=self._store_writer, daemon=True).start()
        threading.Thread(target=self._qual_writer, daemon=True).start()
        self.retry_limit = 3  # 最大重试次数

    def load_config(self, path):
        try:
            with open(path, 'r') as f:
                return yaml.safe_load(f)
        except (FileNotFoundError, yaml.YAMLError) as e:
            print(f"⚠️ 配置错误: {e}，使用默认配置")
            return {'cookie': '', 'max_workers': 10, 'batch_size': 100}

    def _init_csv_files(self):
        with open('stores.csv', 'w', newline='', encoding='utf-8-sig') as f:
            csv.writer(f).writerow(['poiId', 'poiName', 'address', 'province', 'city', 'certificationStatusDesc'])
        with open('qualifications.csv', 'w', newline='', encoding='utf-8-sig') as f:
            # 添加licenseType字段区分资质类型
            csv.writer(f).writerow(['poiId', 'number', 'name', 'address', 'owner', 'isForever', 'licenseType', 'businessScope', 'expirationPeriod'])

    def _store_writer(self):
        while True:
            batch = []
            while len(batch) < self.batch_size:
                item = self.store_queue.get()
                if item is None:
                    if batch: 
                        self._write_batch('stores.csv', batch)
                    return
                batch.append(item)
            self._write_batch('stores.csv', batch)

    def _qual_writer(self):
        while True:
            batch = []
            while len(batch) < self.batch_size:
                item = self.qual_queue.get()
                if item is None:
                    if batch: 
                        self._write_batch('qualifications.csv', batch)
                    return
                batch.append(item)
            self._write_batch('qualifications.csv', batch)

    def _write_batch(self, filename, batch):
        with open(filename, 'a', newline='', encoding='utf-8-sig') as f:
            writer = csv.writer(f)
            writer.writerows(batch)

    def fetch_stores(self, page_no, attempt=1):
        url = f"{self.base_url}/emis/gw/TQualificationService/getPoiQualificationStatusPageV2"
        payload = {"pageNo": page_no, "pageSize": 50, "certificationStatus": 4}
        try:
            resp = self.session.post(
                url, 
                json=payload,
                params={'_tm': int(time.time()*1000)},
                timeout=15
            )
            if resp.status_code == 200:
                data = resp.json().get('data', [])
                self.all_poi_ids.extend([store['poiId'] for store in data])
                return data
            return []
        except Exception as e:
            if attempt <= self.retry_limit:
                time.sleep(2 * attempt)
                return self.fetch_stores(page_no, attempt+1)
            print(f"❌ 第{page_no}页数据获取失败: {str(e)}")
            return []

    def fetch_qualifications(self, poi_id, attempt=1):
        time.sleep(random.uniform(0.3, 0.8))
        url = f"{self.base_url}/gw/nibcus/poiQualification/get?poiId={poi_id}"
        try:
            resp = self.session.get(url, timeout=15)
            if resp.status_code != 200: 
                return []
                
            data = resp.json().get('data', {})
            qualifications = data.get('qualifications', [])
            results = []
            
            for item in qualifications:
                # 使用字典映射获取类型名称，未映射则返回"未知类型_{type}"
                license_type = self.license_type_mapping.get(
                    item.get('type'), 
                    f"未知类型_{item.get('type')}"
                )
                
                results.append([
                    poi_id,
                    item.get('number', ''),
                    item.get('name', ''),
                    item.get('address', ''),
                    item.get('owner', ''),
                    item.get('isForever', 0),
                    license_type,  # 映射后的类型名称
                    item.get('businessScope', ''),
                    item.get('expirationPeriod', '')
                ])
            return results
        except Exception as e:
            if attempt <= self.retry_limit:
                time.sleep(1 * attempt)
                return self.fetch_qualifications(poi_id, attempt+1)
            print(f"❌ 资质获取失败: {poi_id} - {str(e)}")
            return []

    def run(self):
        # ===== 阶段1: 全量分页爬取门店 =====
        print("⏳ 开始爬取门店列表...")
        total_pages = 130
        
        with ThreadPoolExecutor(max_workers=8) as executor:
            futures = {executor.submit(self.fetch_stores, page): page 
                       for page in range(1, total_pages+1)}
            
            for future in tqdm(as_completed(futures), total=total_pages, desc="📦 门店分页进度"):
                stores = future.result()
                for store in stores:
                    self.store_queue.put([
                        store['poiId'], 
                        store['poiName'], 
                        store['address'],
                        store['province'], 
                        store['city'], 
                        store['certificationStatusDesc']
                    ])
        
        # ===== 阶段2: 全量爬取资质 =====
        print(f"⏳ 开始爬取{len(self.all_poi_ids)}家门店资质详情...")
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            futures = {executor.submit(self.fetch_qualifications, poi_id): poi_id 
                      for poi_id in self.all_poi_ids}
            
            with tqdm(total=len(self.all_poi_ids), desc="🔍 资质总进度") as pbar:
                for future in as_completed(futures):
                    for qual_row in future.result():
                        self.qual_queue.put(qual_row)
                    pbar.update(1)
        
        # 终止写入线程
        self.store_queue.put(None)
        self.qual_queue.put(None)
        print("✅ 数据采集完成！")
        print(f"📁 门店数据保存至: stores.csv")
        print(f"📁 资质数据保存至: qualifications.csv")

if __name__ == "__main__":
    crawler = CSVProgressCrawler()
    crawler.run()