import scrapy
import requests
from bs4 import BeautifulSoup
import re
import time
import random
import json
import os
from datetime import datetime
from ..items import DangdangProductItem

class DangdangProductsSpider(scrapy.Spider):
    name = 'dangdang_products'
    allowed_domains = ['dangdang.com']
    
    def __init__(self, *args, **kwargs):
        super(DangdangProductsSpider, self).__init__(*args, **kwargs)
        # 设置请求头
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
        }
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        
        # 初始化错误记录
        self.error_logs = []
        self.success_count = 0
        self.error_count = 0
        self.start_time = datetime.now()
        
        # 创建日志目录
        self.log_dir = 'logs'
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        
        # 分类映射字典
        self.category_mapping = {
            'cp01.01': '青春文学',
            'cp01.03': '小说',
            'cp01.05': '文学',
            'cp01.21': '成功/励志',
            'cp01.31': '心理学',
            'cp01.24': '投资理财',
            'cp01.25': '经济',
            'cp01.28': '哲学/宗教',
            'cp01.36': '历史',
            'cp01.38': '传记',
            'cp01.41': '童书',
            'cp01.43': '中小学用书',
            'cp01.47': '考试',
            'cp01.07': '艺术',
            'cp01.45': '外语',
            'cp01.56': '医学',
            'cp01.54': '计算机/网络',
            'cp01.26': '法律',
            'cp01.18': '保健/养生',
            'cp01.27': '政治/军事',
            'cp01.10': '烹饪/美食',
            'cp01.15': '亲子/家教',
            'cp01.32': '古籍',
            'cp01.58': '英文原版书',
            'cp01.63': '工业技术',
            'cp01.50': '工具书',
            'cp01.17': '育儿/早教',
            'cp01.52': '科普读物',
            'cp01.12': '旅游/地图',
            'cp01.62': '自然科学',
            'cp01.09': '动漫/幽默',
            'cp01.20': '手工/DIY',
            'cp01.30': '社会科学',
            'cp01.34': '文化',
            'cp01.16': '两性关系',
            'cp01.14': '家庭/家居',
            'cp01.66': '农业/林业',
            'cp01.19': '体育/运动',
            'cp01.49': '教材',
            'cp01.11': '时尚/美妆',
            'cp01.04': '休闲/爱好',
            'cp01.59': '港台圖書',
            'cp01.76': '网络课程',
            'cp01.79': '老书/收藏',
            'cp01.78': '其他语种原版书',
            'cp01.80': '中小学教科书',
            'cp01.77': '二手书',
            'cp01.75': '法文原版书',
            'cp01.68': '阅读器',
            'cp01.69': '日文原版书',
            'cp01.73': '韩文原版书',
            'cp01.74': '其他',
        }
        
    def log_error(self, error_type, message, details=None):
        """记录错误信息"""
        error_log = {
            'timestamp': datetime.now().isoformat(),
            'type': error_type,
            'message': message,
            'details': details or {}
        }
        self.error_logs.append(error_log)
        self.error_count += 1
        
        # 保存到文件
        log_file = os.path.join(self.log_dir, 'spider_errors.json')
        try:
            if os.path.exists(log_file):
                with open(log_file, 'r', encoding='utf-8') as f:
                    existing_logs = json.load(f)
            else:
                existing_logs = []
            
            existing_logs.append(error_log)
            
            with open(log_file, 'w', encoding='utf-8') as f:
                json.dump(existing_logs, f, ensure_ascii=False, indent=2)
        except Exception as e:
            self.logger.error(f"保存错误日志失败: {e}")
    
    def log_success(self):
        """记录成功抓取"""
        self.success_count += 1
    
    def save_spider_stats(self):
        """保存爬虫统计信息"""
        stats = {
            'start_time': self.start_time.isoformat(),
            'end_time': datetime.now().isoformat(),
            'success_count': self.success_count,
            'error_count': self.error_count,
            'total_count': self.success_count + self.error_count
        }
        
        stats_file = os.path.join(self.log_dir, 'spider_stats.json')
        try:
            with open(stats_file, 'w', encoding='utf-8') as f:
                json.dump(stats, f, ensure_ascii=False, indent=2)
        except Exception as e:
            self.logger.error(f"保存统计信息失败: {e}")
        
    def start_requests(self):
        # 起始URL - 图书分类页面
        start_urls = [
            'https://category.dangdang.com/cp01.01.00.00.00.00.html',  # 青春文学
            'https://category.dangdang.com/cp01.03.00.00.00.00.html',  # 小说
            'https://category.dangdang.com/cp01.05.00.00.00.00.html',  # 文学
            'https://category.dangdang.com/cp01.21.00.00.00.00.html',  # 成功/励志
            'https://category.dangdang.com/cp01.31.00.00.00.00.html',  # 心理学
            'https://category.dangdang.com/cp01.24.00.00.00.00.html',  # 投资理财
            'https://category.dangdang.com/cp01.25.00.00.00.00.html',  # 经济
            'https://category.dangdang.com/cp01.28.00.00.00.00.html',  # 哲学/宗教
            'https://category.dangdang.com/cp01.36.00.00.00.00.html',  # 历史
            'https://category.dangdang.com/cp01.38.00.00.00.00.html',  # 传记
            'https://category.dangdang.com/cp01.41.00.00.00.00.html',  # 童书
            'https://category.dangdang.com/cp01.43.00.00.00.00.html',  # 中小学用书
            'https://category.dangdang.com/cp01.47.00.00.00.00.html',  # 考试
            'https://category.dangdang.com/cp01.07.00.00.00.00.html',  # 艺术
            'https://category.dangdang.com/cp01.45.00.00.00.00.html',  # 外语
            'https://category.dangdang.com/cp01.56.00.00.00.00.html',  # 医学
            'https://category.dangdang.com/cp01.54.00.00.00.00.html',  # 计算机/网络
            'https://category.dangdang.com/cp01.26.00.00.00.00.html',  # 法律
            'https://category.dangdang.com/cp01.18.00.00.00.00.html',  # 保健/养生
            'https://category.dangdang.com/cp01.27.00.00.00.00.html',  # 政治/军事
            'https://category.dangdang.com/cp01.10.00.00.00.00.html',  # 烹饪/美食
            'https://category.dangdang.com/cp01.15.00.00.00.00.html',  # 亲子/家教
            'https://category.dangdang.com/cp01.32.00.00.00.00.html',  # 古籍
            'https://category.dangdang.com/cp01.58.00.00.00.00.html',  # 英文原版书
            'https://category.dangdang.com/cp01.63.00.00.00.00.html',  # 工业技术
            'https://category.dangdang.com/cp01.50.00.00.00.00.html',  # 工具书
            'https://category.dangdang.com/cp01.17.00.00.00.00.html',  # 育儿/早教
            'https://category.dangdang.com/cp01.52.00.00.00.00.html',  # 科普读物
            'https://category.dangdang.com/cp01.12.00.00.00.00.html',  # 旅游/地图
            'https://category.dangdang.com/cp01.62.00.00.00.00.html',  # 自然科学
            'https://category.dangdang.com/cp01.09.00.00.00.00.html',  # 动漫/幽默
            'https://category.dangdang.com/cp01.20.00.00.00.00.html',  # 手工/DIY
            'https://category.dangdang.com/cp01.30.00.00.00.00.html',  # 社会科学
            'https://category.dangdang.com/cp01.34.00.00.00.00.html',  # 文化
            'https://category.dangdang.com/cp01.16.00.00.00.00.html',  # 两性关系
            'https://category.dangdang.com/cp01.14.00.00.00.00.html',  # 家庭/家居
            'https://category.dangdang.com/cp01.66.00.00.00.00.html',  # 农业/林业
            'https://category.dangdang.com/cp01.19.00.00.00.00.html',  # 体育/运动
            'https://category.dangdang.com/cp01.49.00.00.00.00.html',  # 教材
            'https://category.dangdang.com/cp01.11.00.00.00.00.html',  # 时尚/美妆
            'https://category.dangdang.com/cp01.04.00.00.00.00.html',  # 休闲/爱好
            'https://category.dangdang.com/cp01.59.00.00.00.00.html',  # 港台圖書
            'https://category.dangdang.com/cp01.76.00.00.00.00.html',  # 网络课程
            'https://category.dangdang.com/cp01.79.00.00.00.00.html',  # 老书/收藏
            'https://category.dangdang.com/cp01.78.00.00.00.00.html',  # 其他语种原版书
            'https://category.dangdang.com/cp01.80.00.00.00.00.html',  # 中小学教科书
            'https://category.dangdang.com/cp01.77.00.00.00.00.html',  # 二手书
            'https://category.dangdang.com/cp01.75.00.00.00.00.html',  # 法文原版书
            'https://category.dangdang.com/cp01.68.00.00.00.00.html',  # 阅读器
            'https://category.dangdang.com/cp01.69.00.00.00.00.html',  # 日文原版书
            'https://category.dangdang.com/cp01.73.00.00.00.00.html',  # 韩文原版书
            'https://category.dangdang.com/cp01.74.00.00.00.00.html',  # 其他
        ]
        
        for url in start_urls:
            yield scrapy.Request(url=url, callback=self.parse, meta={'original_url': url, 'page_num': 1})

    def parse(self, response):
        original_url = response.meta.get('original_url', response.url)
        page_num = response.meta.get('page_num', 1)
        
        try:
            # 直接用Scrapy的response.text
            from bs4 import BeautifulSoup
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 解析商品列表 - 使用更准确的选择器找到所有60条商品
            products = []
            for li in soup.select('li[id*="p"]'):
                title_tag = li.select_one('a[name="itemlist-title"]')
                title = title_tag.get_text(strip=True) if title_tag else ''
                link = ''
                if title_tag and title_tag.has_attr('href'):
                    href = title_tag['href']
                    if isinstance(href, str):
                        link = 'https:' + href if href.startswith('//') else href
                img_tag = li.select_one('a.pic img')
                img_url = ''
                if img_tag:
                    img_src = img_tag.get('data-original') or img_tag.get('src') or ''
                    if isinstance(img_src, str):
                        img_url = 'https:' + img_src if img_src.startswith('//') else img_src
                price_tag = li.select_one('p.price span.search_now_price')
                price = price_tag.get_text(strip=True) if price_tag else ''
                
                # 提取定价
                original_price_tag = li.select_one('p.price span.search_pre_price')
                original_price = original_price_tag.get_text(strip=True) if original_price_tag else ''
                
                # 提取作者和出版社
                author_tag = li.select_one('p.search_book_author')
                author = ''
                publisher = ''
                publish_date = ''
                if author_tag:
                    # 提取作者
                    author_link = author_tag.select_one('a[name="itemlist-author"]')
                    if author_link:
                        author = author_link.get_text(strip=True)
                    
                    # 提取出版社
                    publisher_link = author_tag.select_one('a[name="P_cbs"]')
                    if publisher_link:
                        publisher = publisher_link.get_text(strip=True)
                    
                    # 提取发行日期
                    spans = author_tag.find_all('span')
                    for span in spans:
                        span_text = span.get_text(strip=True)
                        if '/' in span_text and len(span_text) > 4 and '-' in span_text:
                            # 可能是日期格式，如 /2023-11-01
                            date_text = span_text.replace('/', '').strip()
                            if len(date_text) == 10 and date_text.count('-') == 2:
                                publish_date = date_text
                                break
                
                # 提取评论数
                comment_tag = li.select_one('a.search_comment_num')
                comment_num = comment_tag.get_text(strip=True) if comment_tag else ''
                
                # 提取描述
                detail_tag = li.select_one('p.detail')
                description = detail_tag.get_text(strip=True) if detail_tag else ''
                
                # 提取SKU
                sku = ''
                if li.has_attr('id'):
                    sku = li['id'].replace('p', '')
                
                # 提取分类信息
                category1 = '全部'
                category2 = '图书'
                category3 = ''
                
                # 根据URL判断分类
                for code, name in self.category_mapping.items():
                    if code in original_url:
                        category3 = name
                        break
                
                product_info = {
                    'sku': sku,
                    'name': title,
                    'price': price,
                    'original_price': original_price,
                    'author': author,
                    'publisher': publisher,
                    'publish_date': publish_date,
                    'image_url': img_url,
                    'description': description,
                    'comment_num': comment_num,
                    'category1': category1,
                    'category2': category2,
                    'category3': category3,
                    'link': link
                }
                
                products.append(product_info)
            
            # 处理每个商品
            for product_elem in products:
                # 检查字段缺失
                missing_fields = []
                if not product_elem.get('name', '').strip():
                    missing_fields.append('商品名称')
                if not product_elem.get('price', '').strip():
                    missing_fields.append('价格')
                if not product_elem.get('author', '').strip():
                    missing_fields.append('作者')
                if not product_elem.get('publisher', '').strip():
                    missing_fields.append('出版社')
                
                if missing_fields:
                    # 记录字段缺失错误
                    self.log_error(
                        'field_missing',
                        f"商品字段缺失: {', '.join(missing_fields)}",
                        {
                            'sku': product_elem.get('sku', ''),
                            'missing_fields': missing_fields,
                            'url': original_url,
                            'page_num': page_num
                        }
                    )
                else:
                    # 记录成功抓取
                    self.log_success()
                
                yield self.extract_product_info(product_elem)
            
            # 记录当前页面抓取的商品数量
            self.logger.info(f"页面 {original_url} 抓取了 {len(products)} 个商品，当前页码: {page_num}")
                
            # 分页处理 - 使用当当网的分页URL规则
            if page_num < 4:
                # 当当网分页URL规则：pg2-前缀
                current_page = page_num + 1
                # 从URL中提取分类代码
                if 'cp01.' in original_url:
                    # 提取分类代码部分
                    category_part = original_url.split('/')[-1]  # 例如: cp01.01.00.00.00.00.html
                    next_page_url = f"https://category.dangdang.com/pg{current_page}-{category_part}"
                else:
                    # 备用方案
                    next_page_url = original_url.replace('category.dangdang.com/', f'category.dangdang.com/pg{current_page}-')
                
                self.logger.info(f"当前页码: {page_num}, 生成下一页链接: {next_page_url}")
                self.logger.info(f"准备发送下一页请求，页码: {page_num+1}")
                
                # 发送下一页请求
                next_request = scrapy.Request(
                    url=next_page_url, 
                    callback=self.parse, 
                    meta={'original_url': original_url, 'page_num': page_num+1}
                )
                self.logger.info(f"下一页请求已创建: {next_request.url}")
                yield next_request
            else:
                self.logger.info(f"页面 {original_url} 已达到最大页数限制(4页)")
                
        except Exception as e:
            self.logger.error(f"解析页面失败: {e}")
    
    def extract_product_info(self, product_elem):
        """提取商品信息并创建Item"""
        item = DangdangProductItem()
        
        # SKU
        sku = product_elem.get('sku', '')
        if sku:
            item['sku'] = sku
        else:
            item['sku'] = ''
        
        # 商品名称
        name = product_elem.get('name', '')
        if name:
            item['name'] = name
        else:
            item['name'] = ''
        
        # 价格
        price = product_elem.get('price', '')
        if price:
            # 提取数字
            price_match = re.search(r'¥?(\d+\.?\d*)', price)
            if price_match:
                item['price'] = float(price_match.group(1))
            else:
                item['price'] = 0.0
        else:
            item['price'] = 0.0
        
        # 定价
        original_price = product_elem.get('original_price', '')
        if original_price:
            # 提取数字
            price_match = re.search(r'¥?(\d+\.?\d*)', original_price)
            if price_match:
                item['original_price'] = float(price_match.group(1))
            else:
                item['original_price'] = 0.0
        else:
            item['original_price'] = 0.0
        
        # 作者
        author = product_elem.get('author', '')
        item['author'] = author
        
        # 出版社
        publisher = product_elem.get('publisher', '')
        item['publisher'] = publisher
        
        # 发行日期
        publish_date = product_elem.get('publish_date', '')
        item['publish_date'] = publish_date
        
        # 图片URL
        img_elem = product_elem.get('image_url')
        if img_elem:
            item['image_url'] = img_elem
        else:
            item['image_url'] = ''
        
        # 评论数
        comment_num = product_elem.get('comment_num', '')
        if comment_num:
            # 提取数字
            comment_match = re.search(r'(\d+)', comment_num)
            if comment_match:
                item['comments'] = int(comment_match.group(1))
            else:
                item['comments'] = 0
        else:
            item['comments'] = 0
        
        # 描述信息
        description = product_elem.get('description', '')
        item['description'] = description
        
        # 分类信息
        category1 = product_elem.get('category1', '')
        item['category1'] = category1
        
        category2 = product_elem.get('category2', '')
        item['category2'] = category2
        
        category3 = product_elem.get('category3', '')
        item['category3'] = category3
        
        # 链接
        link = product_elem.get('link', '')
        item['link'] = link
        
        return item
    
    def closed(self, reason):
        """爬虫关闭时保存统计信息"""
        self.save_spider_stats()
        self.logger.info(f"爬虫结束，成功抓取: {self.success_count}，错误: {self.error_count}") 