# _*_ coding:utf-8 _*_
"""
:File: scrapper_amazon_data.py
:Author: cfp
:Date: 2025/9/9 16:43
:LastEditTime: 2025/9/9 16:43
:LastEditors: cfp
:Description:
"""

import asyncio
from datetime import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
from asgiref.sync import sync_to_async
from django.utils import timezone

from common.core import RedisUrlPool
from scrapper.models.listing import AmazonListing
from scrapper.models.amazon_product_data import AmazonProductData


class GetFast(RedisUrlPool):
    def __init__(self, host="127.0.0.1", db=0, port=6379, groups=[]):
        super(GetFast, self).__init__(host=host, db=db, port=port)
        self._redisKey = "BaiduList"  ##指定网络池的key
        self._max_workers = 1  ##开始多少个任务
        self.groups:list[AmazonListing] = groups

    async def load_url(self):
        """加载url item"""
        for item in self.groups:
            # 访问http://www.httpbin.org/delay/2 是需要2秒才能返回内容。
            # 我们可以加载十个请求链接，按传统的方式是大概需要20多秒才能完成这10个请求
            # 可以自己拿起手表计算下，我们访问了10个请求花了多长的时间
            url_item = {
                "url": "http://www.httpbin.org/delay/2",
                "backfunc": "parse_baidu",
            }
            await self._addurl(url_item)

    async def parse_baidu(self, r):
        """解析对应的回调函数"""
        print(r.keys())
        # 使用 sync_to_async 包装数据库创建操作
        create_product_data = sync_to_async(AmazonProductData.objects.create)
        # 异步创建数据记录
        await create_product_data(
            asin="example_asin",  # 从爬取结果中提取
            title="example_title",  # 从爬取结果中提取
            price=99.99,  # 从爬取结果中提取
            status=1,
            is_delete=0,
            crawl_time=timezone.now(),
        )
        print("收到html长度：", len(r["html"]))

    async def run(self):
        await self.load_url()  ##加载url
        await self.crawl_main()  ##启动爬虫程序


class Command(BaseCommand):
    help = "执行拉取amazon数据"

    def split_asin_groups(self, asin_list, num_groups=24):
        """将ASIN列表按哈希分组，确保同一ASIN固定在同一组"""
        groups = [[] for _ in range(num_groups)]
        for asin in asin_list:
            group_idx = hash(asin) % num_groups  # 哈希取模分组
            groups[group_idx].append(asin)
        return groups

    def handle(self, *args, **options):
        redis_location = settings.CACHES["default"]["LOCATION"]
        host = redis_location.split("://")[1].split(":")[0]
        port = redis_location.split("://")[1].split(":")[1]
        db = redis_location.split("://")[1].split("/")[1]
        # 1. 查询需要爬取的ASIN（status=1且is_delete=0）
        asin_queryset = AmazonListing.objects.filter(status=1, is_delete=0)
        groups = list(asin_queryset)
        # 2. 按时间分散任务（示例：分成24组，对应24小时）
        # groups = self.split_asin_groups(asin_list, num_groups=24)
        if not groups:
            return
        # 3.执行任务
        baidu = GetFast(host=host, port=port, db=db, groups=groups)
        loop = asyncio.get_event_loop()
        loop.run_until_complete(baidu.run())
