#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :hugging_face_datasets.py
# @Time      :2023/10/19 
# @Author    :CL
# @email     :1037654919@qq.com
from random import random

import time
import  json
from lxml import etree
from bs4 import BeautifulSoup
from utils import mongo_manager
import pandas as pd
hugging_face_datasets = mongo_manager('hugging_face_datasets',db='datasets')

import requests
proxies ={'http':'127.0.0.1:15732',
          'https':'127.0.0.1:15732'}

headers = {
    "authority": "huggingface.co",
    "accept": "*/*",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "referer": "https://huggingface.co/datasets?task_categories=task_categories:feature-extraction&p=1&sort=trending",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\"",
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-origin",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
cookies = {
    "__stripe_mid": "e0b77284-b1ae-4576-8f32-a09cbd0c2e7447d809",
    "_ga": "GA1.1.1717933824.1694677947",
    "__stripe_sid": "36ab759b-6b2c-4221-a3f5-a69d0c25ad08757de6",
    "_ga_8Q63TH4CSL": "GS1.1.1697679921.2.1.1697681246.60.0.0",
    "aws-waf-token": "fc77edbf-7de2-49c8-b0ce-b099b6b325fd:EwoAw7IQod0AAAAA:UoftFBdySVRFz1PM31q8YOLsF8bwlaq2Vt9Gog016UC7YgeJY7S7EPaSMkldDFg4ks57Ie/Gdn5xis+FCMiU1yxvFbFi1fKSMajBFi4VgMv3wsZ3OZJcQglf6bJb0Xm+zZ1Lm/eeNSQwK+7XCWiZOqkbRU1ayIm6GMJ9az0W8oag350xT4WcEZCCkF4XiWagBb1s4c2llrWnJuti9KViuV5quGQ4mdjvTfz0+cl1eOyCihl8pNRo"
}
def get_datasets(params = {
        "task_categories": "task_categories:feature-extraction",
        "p": None,
        "sort": "trending"
    }):
    url = "https://huggingface.co/datasets-json"

    response = requests.get(url, headers=headers, cookies=cookies, params=params)
    requests.session().close()
    # print(response.text)
    print(response)
    if response.status_code == 200:
        return response.json()
if __name__ == "__main__":
    fail_pages =[925,971,1241,1288,1370,1396,1510,1622,1816,1889,2111,2130,2216,2239,2294]
    # for page in range(712,2362):
    for page in fail_pages:
        print(f'page:{page}')
        params ={
            # "task_categories": "task_categories:feature-extraction",
            "p": page,
            "sort": "trending"
        }
        try:
            text = get_datasets(params=params)
            datas = text['datasets']
            for data in datas:
                data['_id'] = data['id']
                data['url'] = 'https://huggingface.co/datasets/'+ data['id']
                try:
                    hugging_face_datasets.insertOne(data)
                except  BaseException as e:
                    hugging_face_datasets.updateOne({'_id': data["_id"]}, data)
            time.sleep(random() * 10)
        except:
            print(f'fail page:{page}')
            time.sleep(10)


hugging_face_datasets.close()

