import requests
import re
import json
import pandas as pd
import time
import random
from urllib.parse import quote
import logging
from fake_useragent import UserAgent
import os

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class TaobaoScraper:
    def __init__(self, cookie):
        """
        初始化爬虫
        :param cookie: 淘宝登录后的cookie
        """
        self.cookie = cookie
        # 使用随机User-Agent
        ua = UserAgent()
        self.headers = {
            'authority': 's.taobao.com',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'cache-control': 'max-age=0',
            'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': ua.random,
            'Cookie': self.cookie
        }

    def search_products(self, keyword, page=1, page_size=44):
        """
        搜索商品
        :param keyword: 搜索关键词
        :param page: 页码
        :param page_size: 每页商品数量
        :return: 商品列表
        """
        # 对关键词进行URL编码
        encoded_keyword = quote(keyword)

        # 计算搜索的起始位置
        start = (page - 1) * page_size

        # 尝试不同的搜索参数
        url = f'https://s.taobao.com/search?q={encoded_keyword}&s={start}&sort=sale-desc'

        logger.info(f"正在爬取: {url}")

        # 添加随机延迟，避免被反爬
        time.sleep(random.uniform(2, 5))

        try:
            response = requests.get(url, headers=self.headers, timeout=15)
            response.raise_for_status()

            # 保存响应内容到文件用于调试
            with open('taobao_response.html', 'w', encoding='utf-8') as f:
                f.write(response.text)

            logger.info(f"响应状态码: {response.status_code}")

            # 检查是否被重定向到登录页面
            if 'login.taobao.com' in response.url:
                logger.error("被重定向到登录页面，Cookie可能已失效")
                return []

            # 使用更灵活的正则表达式提取商品数据
            patterns = [
                r'g_page_config\s*=\s*(\{.+?\});\s*',  # 标准格式
                r'window\.g_config\s*=\s*(\{.+?\});\s*',  # 备用格式1
                r'{"mainInfo":.+?"itemList":.+?}',  # 备用格式2
            ]

            data = None
            for pattern in patterns:
                logger.info(f"尝试使用正则: {pattern}")
                matches = re.search(pattern, response.text, re.DOTALL)
                if matches:
                    try:
                        # 尝试解析JSON
                        data = json.loads(matches.group(1))
                        logger.info("成功匹配并解析数据")
                        break
                    except json.JSONDecodeError as e:
                        logger.warning(f"JSON解析失败: {e}")
                        continue

            if not data:
                # 尝试直接搜索商品信息
                logger.info("尝试直接搜索商品信息")
                items = self._extract_items_directly(response.text)
                if items:
                    return items
                else:
                    logger.error("未找到商品数据")
                    return []

            # 尝试不同的数据路径
            possible_paths = [
                ['mods', 'itemlist', 'data', 'auctions'],
                ['data', 'auctions'],
                ['itemList', 'auctions'],
            ]

            items = []
            for path in possible_paths:
                try:
                    current = data
                    for key in path:
                        if key in current:
                            current = current[key]
                        else:
                            break
                    else:
                        if isinstance(current, list):
                            items = current
                            logger.info(f"成功从路径 {path} 获取到商品列表")
                            break
                except Exception as e:
                    logger.warning(f"路径 {path} 提取失败: {e}")
                    continue

            if not items:
                # 如果依然无法获取商品数据，尝试备用方法
                logger.info("使用备用API获取数据")
                return self._search_products_alternative(keyword, page)

            return items

        except Exception as e:
            logger.error(f"爬取过程中出错: {e}")
            return []

    def _extract_items_directly(self, html_content):
        """
        直接从HTML中提取商品信息
        """
        try:
            # 使用正则表达式直接提取商品信息
            items = []

            # 提取商品标题
            titles = re.findall(r'<div class="row row-2 title">.*?<a.*?>(.*?)</a>', html_content, re.DOTALL)

            # 提取价格
            prices = re.findall(r'<strong>([\d.]+)</strong>', html_content)

            # 提取销量
            sales = re.findall(r'<div class="deal-cnt">(.*?)</div>', html_content)

            # 提取店铺名称
            shops = re.findall(r'<a.*?class="shop-name".*?>(.*?)</a>', html_content)

            # 提取商品链接
            links = re.findall(r'<div class="row row-2 title">.*?<a href="(.*?)"', html_content, re.DOTALL)

            # 如果能找到足够的信息，构建商品列表
            min_length = min(len(titles), len(prices), len(sales), len(shops), len(links))

            if min_length > 0:
                logger.info(f"直接提取到 {min_length} 个商品信息")

                for i in range(min_length):
                    item = {
                        'raw_title': titles[i] if i < len(titles) else '',
                        'view_price': prices[i] if i < len(prices) else '',
                        'view_sales': sales[i] if i < len(sales) else '',
                        'nick': shops[i] if i < len(shops) else '',
                        'detail_url': links[i] if i < len(links) else '',
                    }
                    items.append(item)

                return items

            return []
        except Exception as e:
            logger.error(f"直接提取商品信息失败: {e}")
            return []

    def _search_products_alternative(self, keyword, page=1):
        """
        使用备用API搜索商品
        """
        try:
            encoded_keyword = quote(keyword)

            # 使用备用API
            url = f'https://s.taobao.com/api?_ksTS={int(time.time() * 1000)}_1&callback=jsonp&q={encoded_keyword}&js=1&stats_click=search_radio_all&initiative_id=staobaoz_20201107&ie=utf8&sort=sale-desc&bcoffset=0&p4ppushleft=,44&s={(page - 1) * 44}'

            logger.info(f"使用备用API: {url}")

            headers = self.headers.copy()
            headers['Referer'] = f'https://s.taobao.com/search?q={encoded_keyword}'

            time.sleep(random.uniform(3, 6))

            response = requests.get(url, headers=headers, timeout=15)
            response.raise_for_status()

            # 提取JSON数据
            json_str = re.search(r'jsonp\((.*)\)', response.text)
            if not json_str:
                logger.error("备用API未返回有效数据")
                return []

            data = json.loads(json_str.group(1))
            items = data.get('mods', {}).get('itemlist', {}).get('data', {}).get('auctions', [])

            if items:
                logger.info(f"备用API成功获取到 {len(items)} 个商品")
                return items
            else:
                logger.error("备用API未找到商品数据")
                return []

        except Exception as e:
            logger.error(f"备用API调用失败: {e}")
            return []

    def parse_products(self, items, limit=20):
        """
        解析商品数据
        :param items: 商品列表
        :param limit: 限制商品数量
        :return: 解析后的商品数据列表
        """
        products = []
        count = 0

        if not items:
            logger.warning("没有商品数据可解析")
            return []

        logger.info(f"开始解析 {min(len(items), limit)} 个商品")

        for item in items:
            if count >= limit:
                break

            try:
                # 处理不同的数据格式
                if isinstance(item, dict):
                    product = {
                        'rank': count + 1,
                        'title': item.get('raw_title', '') or item.get('title', ''),
                        'price': item.get('view_price', '') or item.get('price', ''),
                        'sales': self._format_sales(item.get('view_sales', '') or item.get('sales', '')),
                        'shop_name': item.get('nick', '') or item.get('shop', ''),
                        'location': item.get('item_loc', '') or item.get('location', ''),
                        'detail_url': self._format_url(item.get('detail_url', '') or item.get('url', '')),
                        'pic_url': self._format_url(item.get('pic_url', '') or item.get('img', ''))
                    }

                    products.append(product)
                    count += 1
                    logger.info(f"已解析商品 {count}: {product['title']}")
                else:
                    logger.warning(f"跳过非字典项: {item}")
                    continue

            except Exception as e:
                logger.error(f"解析商品时出错: {e}")
                continue

        return products

    def _format_sales(self, sales_str):
        """格式化销量字符串"""
        if not sales_str:
            return "0"

        # 移除"人付款"等后缀
        for suffix in ['人付款', '人已付款', '人收货', '笔交易']:
            sales_str = sales_str.replace(suffix, '')

        # 处理单位
        if '万' in sales_str:
            try:
                number = float(sales_str.replace('万', '').strip()) * 10000
                return str(int(number))
            except:
                return sales_str.strip()

        return sales_str.strip()

    def _format_url(self, url):
        """格式化URL"""
        if not url:
            return ""

        if url.startswith('//'):
            return 'https:' + url
        elif not url.startswith('http'):
            return 'https://' + url

        return url

    def save_to_csv(self, products, filename='taobao_products.csv'):
        """
        将商品数据保存为CSV文件
        :param products: 商品数据列表
        :param filename: 文件名
        """
        if not products:
            logger.warning("没有商品数据可保存")
            return

        try:
            df = pd.DataFrame(products)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            logger.info(f"数据已保存到 {filename}")

            # 显示数据摘要
            logger.info(f"成功保存 {len(products)} 条记录")
            logger.info(f"数据列: {', '.join(df.columns)}")

            return True
        except Exception as e:
            logger.error(f"保存CSV文件时出错: {e}")
            return False


def fake_cookie_for_test():
    """生成测试用的假Cookie，用于开发测试"""
    return '; '.join([
        f't={int(time.time() * 1000)}',
        f'_tb_token_={"".join(random.choices("abcdef0123456789", k=16))}',
        f'cookie2={"".join(random.choices("abcdef0123456789", k=32))}',
        f'_samesite_flag_=true'
    ])


def main():
    # 显示欢迎信息
    print("=" * 50)
    print("淘宝商品爬虫 - 竞品分析工具")
    print("=" * 50)

    # 用户需要填入自己的cookie
    cookie = 'miid=6105404320101034920; t=bc29a478f7188ba025c51626159ca9a1; cna=8v+gIBvknUwCAXW1aXnum5BN; thw=hk; xlly_s=1; sca=4aa2482e; _tb_token_=e5317ba5ba948; arms_uid=72b63928-c33c-4aa5-9733-11218e4c2cc7; _samesite_flag_=true; 3PcFlag=1747904750578; cookie2=1d4274f4939c12449ac3587702bf0cd6; sgcookie=E100m6rXWqEdXdfKJqsY2Vmek3BfS9sInYf5tuFnNjkWFWx4I7UuRKtevcfdPJ%2FV32AGRlTCXtsGUyTnahoYRxwVWCQEjNlcVbSOY1xnLYDB1Z4%3D; wk_cookie2=1dda742471fc9a2da93757ce5f2b0763; wk_unb=UUphzOvGaD3if65qlw%3D%3D; unb=2206930072547; uc1=cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D&pas=0&cookie15=UIHiLt3xD8xYTw%3D%3D&existShop=false&cookie21=U%2BGCWk%2F7pY%2FF&cookie14=UoYajLyLJT%2BQjg%3D%3D; uc3=id2=UUphzOvGaD3if65qlw%3D%3D&nk2=F5RMGyZfmXdb%2B8s%3D&vt3=F8dD2f%2FHdkMAWm0pfbk%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D; csg=6d569628; lgc=tb925770606; cancelledSubSites=empty; cookie17=UUphzOvGaD3if65qlw%3D%3D; dnk=tb925770606; skt=8b10da363be2a3f4; existShop=MTc0NzkwNDc4NQ%3D%3D; uc4=id4=0%40U2grF8CKZuOXLnSpzIbF5sDUcyl0nqqW&nk4=0%40FY4HXgw7lyGZ1%2FSOp2rg0E%2FCRnhvFw%3D%3D; tracknick=tb925770606; _cc_=URm48syIZQ%3D%3D; _l_g_=Ug%3D%3D; sg=677; _nk_=tb925770606; cookie1=AC%2FIzK%2FN7oa7iXYNCmS1D09mNQH0XM9ATwPpQ9biwjU%3D; aui=2206930072547; bxuab=0; mtop_partitioned_detect=1; _m_h5_tk=e57e7b82d5481cbd6e367b4e95f203a5_1747918294360; _m_h5_tk_enc=b1cedd413c31815740b3c5672da0b484; JSESSIONID=5DD42235B1B0A0B5D23A4CFB4FF776C0; tfstk=gh7mzwqY5i-XE3eJ2aTbxXZnLYZRkET6cO39BFpa4LJSkhJTcOxGBTtNkECAEdWdICQAWNjSS611kSTTGE1X5FyLpyUGGsT_s4A6ZZYyzIdaWxJa2UyxaF48pyUdGoRXW_ydWE8joiRK7F-2b4zyFC0ZgIWw44R6UqRZuCkzZL_ikfT9cFl5dwjXDII9IU1etxMtfoZiw67BHs9aGUN5BIoVBZpv_h7D1xO2hO-yaCdZgI-w_UzyFC8wQO8aZUJWsFJZhyi6Kt6dz8M8MVlpIXbWinvFgK5AMazp4m1DEELVrIbDLsos7V7kin7Pv2vk3nINOH_AuP0Wk6jyzBCbo4YFgBXve9zErU5RaOKCVRgpVTbMbZ-j_7JPoTbXutEZ-9YcIH72qfuOt_JDrQ7U6D9RrKBNo3PK-hLPJH8V2uwvXEAhQZ6msVWFw6QXVN2nUdsvOURdGr0k8sRN4OorYAwE5QPOzci60QOkpn8GYqUyc58LZ7mQznRWGzVuZci60QOkp7VoY0-2NIal.; isg=BK-vatsoGwmMPB87NJKQbC_xPsO5VAN2OLGEg8E-i54lEM4SySA1xueGkgAuXNvu'
    if not cookie.strip():
        logger.warning("使用测试Cookie，功能可能受限")
        cookie = fake_cookie_for_test()

    keyword = 'elementor pro'

    # 创建目录存放结果
    os.makedirs('results', exist_ok=True)

    scraper = TaobaoScraper(cookie)

    # 爬取第一页数据，按销量排序
    items = scraper.search_products(keyword)

    # 如果第一页没有足够的数据，尝试第二页
    if len(items) < 20:
        logger.info("第一页数据不足，尝试爬取第二页")
        items2 = scraper.search_products(keyword, page=2)
        items.extend(items2)

    # 解析前20个商品
    products = scraper.parse_products(items, limit=20)

    if products:
        # 保存为CSV
        filename = f'results/taobao_{keyword.replace(" ", "_")}_top20.csv'
        success = scraper.save_to_csv(products, filename)

        if success:
            print(f"成功爬取 {keyword} 的前20个商品数据")
            print(f"数据已保存到: {filename}")
        else:
            print("数据保存失败，请查看日志了解详情")
    else:
        print(f"未能爬取到 {keyword} 的商品数据")
        print("可能原因: Cookie无效、关键词太小众、网络问题或淘宝反爬机制升级")
        print("建议: 更新Cookie、尝试热门关键词、检查网络连接")


if __name__ == "__main__":
    main()
