#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :ac_data.py
# @Time      :2023/11/8 
# @Author    :CL
# @email     :1037654919@qq.com
# 爬取  http://www.data.ac.cn/tabinfo/  数据表
import math
import time

import requests
from bs4 import BeautifulSoup
import pandas as pd
from utils import mongo_manager,get_kuai_proxy
ac_data = mongo_manager("ac_data",db='public_data')
headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "no-cache",
    "Pragma": "no-cache",
    "Proxy-Connection": "keep-alive",
    "Referer": "http://www.data.ac.cn/list/tab_water",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
# 获取所有类型的表数量
def  get_tableinfo(url = "https://www.data.ac.cn/tabinfo/"):
    response = requests.get(url, headers=headers)
    print(response)
    tablesinfo = []
    if response.status_code ==200:
        soups = BeautifulSoup(response.text, "lxml")
        tables = soups.find_all('div', class_='col-sm-4')

        for ll in tables:
            type = ll.find('div', class_='panel-heading').get_text().strip()
            type_href = ll.find('div', class_='panel-heading').find('a').get('href')
            for li in ll.find_all('li'):
                tab_name = li.get_text().strip().split('\n')[0]
                tab_num = li.get_text().strip().split('\n')[-1].replace('(', '').replace(')', '')
                tab_href = li.find('a').get('href')
                # print(type,type_href,tab_name,tab_num,tab_href)
                tablesinfo.append({'type': type, 'type_href': type_href, 'tab_name': tab_name, 'tab_num': tab_num,
                                   'tab_href': tab_href})
    return tablesinfo
#  获取每个类型的表
def get_table_list(url ='https://www.data.ac.cn/list/tab_water'):
    response = requests.get(url, headers=headers,proxies=get_kuai_proxy(),timeout=10)
    # print(response.text)
    print(response)
    if response.status_code == 200:
        return response.text
# 获取表数据
def get_table_data(url = "http://www.data.ac.cn/table/tbc40"):
    response = requests.get(url, headers=headers,proxies=get_kuai_proxy(), timeout=10,verify=False)
    if response.status_code == 200:
        soups = BeautifulSoup(response.text, "lxml")
        h1 = soups.find('h1').get_text()
        contents = soups.select('table')[0]  # [0]将返回的list改为bs4类型
        tbl = pd.read_html(contents.prettify(), header=0)[0]
        # print(tbl.columns)
        article_infos = [{j: str(row[j]) for j in tbl.columns} for i, row in tbl.iterrows()]
        return article_infos


if __name__ == "__main__":
    print()
    tableinfo = get_tableinfo()
    # print(tableinfo)
    for tab in tableinfo:
        url  = 'https://www.data.ac.cn' + tab['tab_href']
        num = int(tab['tab_num'])
        print(tab['type'])
        for page in range(1, math.ceil(num/10)+1):
            url =  'https://www.data.ac.cn' + tab['tab_href'] + '/' +str(page) + '?tag='
            print(url)  # https://www.data.ac.cn/list/tab_water/2?tag=
            res = get_table_list(url)
            soups = BeautifulSoup(res, 'lxml')
            datas = soups.find('ul', class_='list-group').find_all('li')
            for li in datas:
                table_href = 'http://www.data.ac.cn'+ li.find('a').get('href')
                table_name = li.find('a').get_text()
                table_time = li.find('span').get_text()
                table_info = {'_id':table_href,'type': tab['type'],'type2': tab['tab_name'], 'table_href': table_href, 'table_name': table_name, 'table_time': table_time}
                try :
                    table_info['data'] = get_table_data(table_href)
                except BaseException as e:
                    print(e)
                try :
                    ac_data.insertOne(table_info)
                except:
                    ac_data.updateOne({"_id":table_info["_id"]},table_info)
            time.sleep(5)
    ac_data.close()





