
# -*- coding: utf-8 -*-

import datetime
import os
import sys
import time

curr_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(curr_dir, "."))
sys.path.append(os.path.join(curr_dir, "./datas"))
sys.path.append(os.path.join(curr_dir, "./docs"))
sys.path.append(os.path.join(curr_dir, "./scraping_code"))

from scraping_code import operation_file
import env
from scraping_code import scraping


def init_scrap_file(source_file, tem_file):
    # excel_path = "../docs/搜索词.xlsx"
    df_source = operation_file.get_scraping_title(source_file, usecols=['编号', '名称'])
    # tem_file = '../docs/scraping_progress.xlsx'
    if os.path.exists(tem_file):
        os.remove(tem_file)

    df_keys_map = {
        "registration_code": '编号',
        "title": "名称",
        "url": None,
        "scrap_time": None,
        "status": None
        }
    operation_file.write_temp_data(df_source, tem_file, df_keys_map)
    return tem_file


def scrape_products_by_keywords(file_path):
    """根据关键词列表爬取商品信息（线程池版本）
    """
    def _scrape_keyword(keywords):
        """单个关键词的爬取任务"""
        scraper = scraping.TaobaoProductScraper()
        for code_value, keyword in keywords.items():
            print(f"==============start {code_value} {keyword}===============")
            time.sleep(1)
            _status = 0
            _product_url = None
            try:
                # print(f"\n开始处理关键词: {keyword}")
                scraping_time = datetime.datetime.now().strftime(r"%Y-%m-%d %H:%M:%S")
                # 搜索商品
                _product_url = scraper.search_products(keyword)
                if not _product_url:
                    print(f"未找到关键词 '{keyword}' 的商品")
                    continue

                # 获取商品详情
                product_info = scraper.get_product_details(_product_url)
                # product_info = {"carousel_images": list(), "description_images": list()}
                if not product_info:
                    print(f"无法获取关键词 '{keyword}' 的商品详情")
                    continue

                save_path = f"""{env.SCRAPING_DATA_DIR}/{code_value} {keyword}"""
                operation_file.create_dir(save_path)
                # 保存商品信息
                operation_file.save_product_info(product_info, save_path)
                txt_file = os.path.join(save_path, "商品信息.txt")
                params_dict = {
                    "product_name": keyword,
                    "detail_url": _product_url,
                    "time": scraping_time
                    }
                operation_file.write_to_text(txt_file, params_dict)
                if product_info.get('carousel_images') and product_info.get('description_images'):
                    _status = 1
            except Exception as e:
                print(f"处理关键词 '{keyword}' 时出错: {str(e)}")
            
            # 跟新记录
            operation_file.update_excel_row(file_path, code_value, keyword, _product_url, scraping_time, _status)
            print(f"==============end {code_value} {keyword}===============")
        scraper.close()
        scraper = None
    
    print("================== start scrape_products_by_keywords ==================")
    # 使用线程池

    df_sp = operation_file.get_scraping_title(file_path, sheet_name=0, header=0, usecols=['registration_code', 'title', 'status'])
    keyword_datas = {row['registration_code']: row['title'] for _, row in df_sp.iterrows() if row['status'] not in [1, '1']}
    # keyword_datas = {"1": "iPhone 17", "2": "羽绒服", "3": "皮鞋", "4": "衬衫", "5": "毛衣", 
    #                  "6": "月饼", "7": "5090ti显卡", "8": "美的空调", "9": "华为手机"} 
    _scrape_keyword(keyword_datas)

    # 多线程方式
    # keyword_groups = operation_file.convert_dict_to_grouped_list_2(keyword_datas, group_size=5) # type: list
    # # keyword_groups = [{"q1": "羽绒服"}]
    # with ThreadPoolExecutor(max_workers=3) as executor:  # 可以根据需要调整max_workers
    #     executor.map(_scrape_keyword, keyword_groups)

    df_sp = None
    keyword_datas = None
    print("================== end scrape_products_by_keywords ==================")


def main():
    init_scrap_file(env.SOURCE_FILE, env.TEM_FILE)
    file_path = './docs/scraping_progress.xlsx'
    scrape_products_by_keywords(file_path)


if __name__== "__main__":
    main()

