# https://huggingface.co/api/quicksearch?q=from+diffusers&type=all



# https://huggingface.co/api/quicksearch?q=from+diffusers&type=all

import requests
from pymongo import MongoClient

from top.starp.util import mongo_util
from top.starp.util import k
from top.starp.util import json_util

from top.starp.util import file_util
from top.starp.util import time_util
from top.starp.util import mongo_util
from top.starp.util import time_util
import time


def getStartId(id_collection):
    last_crawled_id = id_collection.find_one(sort=[('_id', -1)])

    if last_crawled_id:
        start_id = last_crawled_id['id'] + 1
        return start_id
    # else:
    #     return 1
    return 1
        # start_id = 6424  # 初始ID
        # start_id = 6424  # 初始ID
    # return start_id
def getStartIdCursor(id_collection):
    last_crawled_id = id_collection.find_one(sort=[('_id', -1)])
    if last_crawled_id:
        start_id = last_crawled_id['id'] + 1
        return start_id
    return 604350204059149750

# CrawlerUtil
def crawler_data(url_tpl="https://huggingface.co/api/quicksearch?q={ques}&type=all",db_name="huggingface",run_cnt=20,ques="from diffusers",conf_path="/kaggle/input/private-conf/mongodb.json"):
    # conf_path="/kaggle/input/private-conf/mongodb.json"
    # "https://civitai.com/api/v1/creators"


    # conf=mongo_util.conf_path_to_mongodb_url(conf_path)
    # client=mongo_util.get_connection(conf)


    # conf=mongo_util.conf_path_to_mongodb_url(rf"/home/app/private-conf/mongodb.json")

    # conf=mongo_util.conf_path_to_mongodb_url(rf"D:\home\app\private-conf\mongodb.json")
    
    client=mongo_util.getConnectionByConfPath(conf_path)
    # 连接MongoDB数据库
    # client = MongoClient('mongodb://localhost:27017')
    # db = client[k.use_log]  # 替换为您的数据库名字
    db = client[db_name] 
    

    collection = db[k.data]  # 替换为您的集合名字
    id_collection = db[k.crawled_ids]  # 创建一个新集合用于存储已爬取的ID
    crawler_err_cloc = db[k.crawler_err]  # 替换为您的集合名字

    # collection = db[k.huggingface]  # 替换为您的集合名字
    # id_collection = db[k.huggingface_crawled_ids]  # 创建一个新集合用于存储已爬取的ID
    # crawler_err_cloc = db[k.huggingface_crawler_err]  # 替换为您的集合名字

    # 获取最后一个已爬取的ID
    # last_crawled_id = id_collection.find_one(sort=[('_id', -1)])

    # if last_crawled_id:
    #     start_id = last_crawled_id['id'] + 1
    # else:
    #     start_id = 6424  # 初始ID


    time_sleep_sec=5
    start_id=getStartId(id_collection)
    # ques="from diffusers"
    # run_cnt=20
    # run_cnt=20
    idx=0
    now_id=start_id
    print("start ")
    timeout=3
    while True:
        if(idx>run_cnt):
            break
        idx+=1
        # print(f"正在爬取ID为{start_id}的数据...")
        # 构造URL
        # url = f"https://civitai.com/api/v1/models/{start_id}"
        # url_tpl="https://huggingface.co/api/quicksearch?q={ques}&type=all"
        url=url_tpl.replace("{ques}",ques)
        # url =f"https://huggingface.co/api/quicksearch?q={ques}&type=all"
        # now_id=start_id
        print("url",url)
        
        # 发送HTTP请求获取数据
        # response = requests.get(url,timeout=3*1000)
        response = requests.get(url,timeout=timeout)
        
        # 检查请求是否成功
        if response.status_code == 200:
            data = response.json()
            data[k.question]=ques
            
            # 将数据写入MongoDB
            collection.insert_one(data)
            
            # 更新已爬取的ID集合
            id_collection.insert_one({'id': now_id})
            
            # 递增ID
            # start_id += 1
        else:
            print(f"请求失败：{response.status_code}")
            # time_u 
            now_time_str=time_util.get_now_time_str()
            crawler_err_log={
                "id":now_id,
                'now_time_str':now_time_str,
                "response":str(response),
                k.question:ques,
                k.url:url,
                k.time_sleep_sec:time_sleep_sec
            }
            crawler_err_cloc.insert_one(crawler_err_log)
        now_id += 1
        # 每隔一定时间间隔爬取
        
        time.sleep(time_sleep_sec)

    # 关闭MongoDB连接
    client.close()



# CrawlerUtil
def crawler_data_wan_xiang(url_tpl="https://huggingface.co/api/quicksearch?q={ques}&type=all"
                           ,db_name="huggingface",run_cnt=20,ques="from diffusers"
                           ,conf_path="/kaggle/input/private-conf/mongodb.json"
                           ,time_sleep_sec=5,cursor_now="1",timeout=3):

    client=mongo_util.getConnectionByConfPath(conf_path)

    db = client[db_name] 

    collection = db[k.data]  # 替换为您的集合名字
    id_collection = db[k.crawled_ids]  # 创建一个新集合用于存储已爬取的ID
    crawler_err_cloc = db[k.crawler_err]  # 替换为您的集合名字

    # time_sleep_sec=5
    start_id=getStartIdCursor(id_collection)

    idx=0
    now_id=start_id
    print("start ")
    # timeout=3
    while True:
        if(idx>run_cnt):
            break
        idx+=1
   
        url=url_tpl.replace("{ques}",ques).replace("{cursor}",str(cursor_now))
   
        print("url",url)

        response = requests.get(url,timeout=timeout)
        
        # 检查请求是否成功
        if response.status_code == 200:
            data = response.json()
            data[k.question]=ques
            dataObj=data['data']
            cursor=dataObj['cursor']
            
            # 将数据写入MongoDB
            collection.insert_one(data)
            
            # 更新已爬取的ID集合
            id_collection.insert_one({'id': now_id})
            
            # 递增ID
            # start_id += 1
        else:
            print(f"请求失败：{response.status_code}")
            # time_u 
            now_time_str=time_util.get_now_time_str()
            crawler_err_log={
                "id":now_id,
                'now_time_str':now_time_str,
                "response":str(response),
                k.question:ques,
                k.url:url,
                k.time_sleep_sec:time_sleep_sec
            }
            crawler_err_cloc.insert_one(crawler_err_log)
        # now_id += 1
        now_id=cursor
        # 每隔一定时间间隔爬取
        
        time.sleep(time_sleep_sec)

    # 关闭MongoDB连接
    client.close()

# start C:\Users\25004\Desktop\recStart.url
# "C:\Users\25004\Desktop\recStart.url"
# crawler_data(db_name="huggingface",run_cnt=20,ques="from diffusers",conf_path="/kaggle/input/private-conf/mongodb.json")