import time
from setting import *
# from tools import *
from lxml import html
from util.data_utils import *

def main(key_url, group_name):
    while True:
        print(getCurTime(), f"【{group_name} 开始】")
        try:
            response = ownRequest(key_url, headers=HEADERS, proxies=PROXIES)
        except:
            print("多次获取列表页面失败，采集下一个...")
            continue
        if not response:
            print(getCurTime(), "获取列表页面失败...")
            # time.sleep(1)
            continue
        
        tree = html.fromstring(response.text)
        # xpath获取所有title标签
        urls = tree.xpath("//*[@id='group-topics']/div[2]/table/tr[position()>1]/td[1]/a/@href")
        
        for url in urls:
            # unique_id = urlparse(url).path
            unique_id = url.split("/")[-2]
            
            # https://www.douban.com/group/topic/327504110/?_spm_id=MTQ3NDM5ODc3&_i=9523837XjFQJ4B,9524813XjFQJ4B
            if R.sismember("unique_set_douban", unique_id):
                # print(f"已经提取过该文章：{article_id}")
                continue
            
            data = fetch_data(group_name, unique_id,url)
            # 主动推过去的文章 为0
            data["push_task_id"] = 0
            
            # 保存unique_id
            if toDatabase(data): 
                R.sadd("unique_set_douban", unique_id)

        print(getCurTime(), f"【{group_name} 结束】")
        time.sleep(20)


if __name__ == "__main__":
    # 买组  拼组
    key_url_lis = [
        ["https://www.douban.com/group/698716/?tab=51757", "买组"],
        # ["https://www.douban.com/group/536786/?tab=47726", "拼组"]
                ]
    # 创建线程列表
    threads = []

    # 创建并启动多个线程
    for items in key_url_lis:
        t = threading.Thread(target=main, args=(*items,))
        threads.append(t)
        t.start()

    # 等待所有线程完成
    for t in threads:
        t.join()
