#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :dataset_info.py
# @Time      :2023/10/19 
# @Author    :CL
# @email     :1037654919@qq.com
import json
import time
import pandas as pd
import requests
from lxml import etree
from bs4 import BeautifulSoup
proxies ={'http':'127.0.0.1:15732',
          'https':'127.0.0.1:15732'}
headers = {
    "authority": "huggingface.co",
    "accept": "*/*",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "referer": "https://huggingface.co/datasets?task_categories=task_categories:feature-extraction&p=1&sort=trending",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\"",
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-origin",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
cookies = {
    "__stripe_mid": "e0b77284-b1ae-4576-8f32-a09cbd0c2e7447d809",
    "_ga": "GA1.1.1717933824.1694677947",
    "__stripe_sid": "36ab759b-6b2c-4221-a3f5-a69d0c25ad08757de6",
    "_ga_8Q63TH4CSL": "GS1.1.1697679921.2.1.1697681246.60.0.0",
    "aws-waf-token": "fc77edbf-7de2-49c8-b0ce-b099b6b325fd:EwoAw7IQod0AAAAA:UoftFBdySVRFz1PM31q8YOLsF8bwlaq2Vt9Gog016UC7YgeJY7S7EPaSMkldDFg4ks57Ie/Gdn5xis+FCMiU1yxvFbFi1fKSMajBFi4VgMv3wsZ3OZJcQglf6bJb0Xm+zZ1Lm/eeNSQwK+7XCWiZOqkbRU1ayIm6GMJ9az0W8oag350xT4WcEZCCkF4XiWagBb1s4c2llrWnJuti9KViuV5quGQ4mdjvTfz0+cl1eOyCihl8pNRo"
}
from utils import mongo_manager
hugging_face_datasets = mongo_manager('hugging_face_datasets',db = 'datasets')
if __name__ == "__main__":
    # hugging_face_datasets.updateMany({},{'status':None})
    while True:
        seeds = hugging_face_datasets.findAll({'status':None}).limit(200)
        work_list=[]
        for data in seeds:
            work_list.append(data)
        print(len(work_list))
        for data in work_list:
            try:
                url = 'https://huggingface.co/datasets/' + data['id']
                print(f"url:{url}")
                data['url'] = url
                res = requests.get(url, headers=headers, cookies=cookies, proxies=proxies)
                soups = BeautifulSoup(res.text, 'lxml')
                info1 = soups.find_all('header')[1].find('h1').text
                like = info1.split('\n\n\n\n')[1].strip().split('\n')[1]
                data['like'] = like
                try:
                    info2 = soups.find_all('header')[1].find('div', class_='mb-3 flex flex-wrap md:mb-4')
                    info2 = info2.text.strip().replace('\n\n', '   ').replace('\t', ' ').replace('\n', '')
                    data['info'] = info2
                except:
                    pass
                # 获取示例数据
                try:
                    content = soups.select('table')[0]
                    # print('content',content)
                    tbl = pd.read_html(content.prettify(), header=0)[0]
                    tbl.columns = [text.split()[0] for text in tbl.columns]
                    # print(tbl)
                    data['view'] = json.loads(tbl.to_json())
                except:
                    pass
                data['status'] = 'success'
            except  BaseException as e:
                print(e)
            try:
                hugging_face_datasets.updateOne({'_id':data["_id"]},data)
            except : # 失败的原因可能是存在大数字，故将view 从json转为str
                data['view'] = json.dumps(data['view'])
                hugging_face_datasets.updateOne({'_id': data["_id"]}, data)
            # time.sleep(0.1)

        if len(work_list)<200:
            break
        time.sleep(5)

hugging_face_datasets.close()