#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :dataset_info_pool.py
# @Time      :2023/10/20 
# @Author    :CL
# @email     :1037654919@qq.com

import json
import re
import time
import pandas as pd
import requests
from lxml import etree
from bs4 import BeautifulSoup
from multiprocessing import Pool

proxies ={'http':'127.0.0.1:15732',
          'https':'127.0.0.1:15732'}
headers = {
    "authority": "huggingface.co",
    "accept": "*/*",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "referer": "https://huggingface.co/datasets?task_categories=task_categories:feature-extraction&p=1&sort=trending",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\"",
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-origin",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
cookies = {
    "__stripe_mid": "e0b77284-b1ae-4576-8f32-a09cbd0c2e7447d809",
    "_ga": "GA1.1.1717933824.1694677947",
    "__stripe_sid": "36ab759b-6b2c-4221-a3f5-a69d0c25ad08757de6",
    "_ga_8Q63TH4CSL": "GS1.1.1697679921.2.1.1697681246.60.0.0",
    "aws-waf-token": "fc77edbf-7de2-49c8-b0ce-b099b6b325fd:EwoAw7IQod0AAAAA:UoftFBdySVRFz1PM31q8YOLsF8bwlaq2Vt9Gog016UC7YgeJY7S7EPaSMkldDFg4ks57Ie/Gdn5xis+FCMiU1yxvFbFi1fKSMajBFi4VgMv3wsZ3OZJcQglf6bJb0Xm+zZ1Lm/eeNSQwK+7XCWiZOqkbRU1ayIm6GMJ9az0W8oag350xT4WcEZCCkF4XiWagBb1s4c2llrWnJuti9KViuV5quGQ4mdjvTfz0+cl1eOyCihl8pNRo"
}
from utils import mongo_manager
def get_info(data):
    try:
        url = 'https://huggingface.co/datasets/' + data['id']
        print(f"url:{url}")
        data['url'] = url
        res = requests.get(url, headers=headers, cookies=cookies, proxies=proxies)
        soups = BeautifulSoup(res.text, 'lxml')
        # 获取info
        try:
            lists = soups.find_all('header')[1].find('div', class_='mb-3 flex flex-wrap md:mb-4').find_all('div')
            info2=[ll.text.strip().replace('\n\n', '   ').replace('\t', ' ').replace('\n', '') for ll in lists if ':' in ll.text]
            info_dict = dict(item.split(':') for item in info2)
            # print('-'*10,info_dict)
            data['info'] = info_dict
        except :
            pass
        # 获取示例数据data['view']
        try:
            datass  = soups.find('div',class_ ='px-2.5')
            content = datass.select('table')[0]
            # print('content',content)
            tbl = pd.read_html(content.prettify(), header=0)[0]
            tbl.columns = [text.split()[0] for text in tbl.columns]
            # print(tbl)
            data['view'] = json.loads(tbl.to_json())
            # print('data view:',data['view'])
        except:
            pass
        # 获取第一段内容
        try:
            datass = soups.find('div', class_='2xl:pr-6')
            data['desc'] = str(datass)
        except:
            pass
        # 获取size
        try:
            size={}
            datass = soups.find('div', class_='flex flex-col flex-wrap xl:flex-row').find_all('a')
            for datas in datass:
                # print(datas)
                name = datas.find('div').get_text()
                size[name] = datas.find_all('div')[-1].get_text()
            data['size'] = size
        except BaseException as e:
            print('size error',e)
        data['status'] = 'success'
    except  BaseException as e:
        print(e)
    try:
        hugging_face_datasets.updateOne({'_id': data["_id"]}, data)
    except:  # 失败的原因可能是存在大数字，故将view 从json转为str
        data['view'] = json.dumps(data['view'])
        hugging_face_datasets.updateOne({'_id': data["_id"]}, data)
    time.sleep(0.1)
def get_info_20231031(data):
    try:
        url = 'https://huggingface.co/datasets/' + data['id']
        print(f"url:{url}")
        data['url'] = url
        res = requests.get(url, headers=headers, cookies=cookies, proxies=proxies)
        soups = BeautifulSoup(res.text, 'lxml')
        # 获取info
        try:
            lists = soups.find_all('header')[1].find('div', class_='mb-3 flex flex-wrap md:mb-4').find_all('div')
            info2=[ll.text.strip().replace('\n\n', '   ').replace('\t', ' ').replace('\n', '') for ll in lists if ':' in ll.text]
            # info_dict = dict(item.split(':') for item in info2)  # 简洁 但是不能处理特殊数据 有2700+条。
            info_dict = {}
            for item in info2:
                parts = item.split(':')
                key = parts[0].strip()
                value = ':'.join(parts[1:]).strip()
                info_dict[key] = value
            data['info'] = info_dict
        except BaseException as e:
            print(e)
        data['status'] = 'success'
    except  BaseException as e:
        print(e)
    hugging_face_datasets.updateOne({'_id': data["_id"]}, data)

# 对数据的修复，部分info字段为str 修复为dict
def main_20231031():
    hugging_face_datasets = mongo_manager('hugging_face_datasets', db='datasets')

    # 查询info字段是字符串类型的文档
    # hugging_face_datasets.updateMany({"info": {"$type": 2}},{'status':None})
    # hugging_face_datasets.updateMany({"info": ''},{"info": {}})

    while True:
        seeds = hugging_face_datasets.findAll({"info": {"$type": 2},'status':None}).limit(500)
        work_list=[]
        for data in seeds:
            work_list.append(data)
        print(len(work_list))
        # run
        pool = Pool(processes=5)
        pool.map(get_info_20231031, work_list)
        pool.close()  # 关闭进程池，不再接受新的进程
        pool.join()
        if len(work_list)<500:
            break
        time.sleep(5)
        # break
    hugging_face_datasets.close()


def main():
    hugging_face_datasets = mongo_manager('hugging_face_datasets', db='datasets')

    # hugging_face_datasets.updateMany({'status':'success'},{'status':None})
    # hugging_face_datasets.updateOne({'status':None},{'status':None})
    while True:
        seeds = hugging_face_datasets.findAll({'size':None}).limit(500)
        work_list=[]
        for data in seeds:
            work_list.append(data)
        print(len(work_list))
        # run
        pool = Pool(processes=5)
        pool.map(get_info, work_list)
        pool.close()  # 关闭进程池，不再接受新的进程
        pool.join()
        if len(work_list)<500:
            break
        time.sleep(5)
        # break


    hugging_face_datasets.close()

if __name__ == "__main__":
    hugging_face_datasets = mongo_manager('hugging_face_datasets', db='datasets')

    print("start")
    main_20231031()
    hugging_face_datasets.close()

