#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :examcoo.py
# @Time      :2023/11/23
# @Author    :CL
# @email     :1037654919@qq.com
import re
import time

import requests
from bs4 import BeautifulSoup
import pandas as pd
from retrying import retry

from utils import mongo_manager

examcoo_index = mongo_manager('examcoo_index',db ='public_data')
examcoo_paper = mongo_manager('examcoo_paper',db ='public_data')

# pigcha
proxies = {'http':'127.0.0.1:15732',
           'https':'127.0.0.1:15732'}
headers = {
    "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Accept-Encoding": "gzip, deflate, br",
    "Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Pragma": "no-cache",
    "Cache-Control": "no-cache"
}
cookies = {
    "PHPSESSID": "1l3niapfmmh22h1hgtm9npm3h2"
}


# 公共题库中心
def get_ku(url = 'https://www.examcoo.com/index/ku'):
    response = requests.get(url, headers=headers, cookies=cookies)
    print(response.url,response)

    soups = BeautifulSoup(response.text,'lxml')
    datas =soups.find('div',id='main').find('table').find_all('a')
    lists=[]
    for data in datas:
        level =1 # 第一级别
        name = data.get_text()
        href = 'https://www.examcoo.com'+data['href']
        try:
            title = data['title']
        except:
            title =None
        lists.append({'name':name,'href':href,'title':title,'level':level})
    # print(lists)
    return lists
#  细分  职业资格类考试
def get_detail(url = 'https://www.examcoo.com/index/detail/mid/2'):
    response = requests.get(url, headers=headers, cookies=cookies)
    print(response.url,response)
    soups = BeautifulSoup(response.text,'lxml')
    datas =soups.find('div',id='main').find_all('div',class_='catSubBox')
    lists=[]
    for data in datas:

        # print(data)
        level = 2 # 第二级别
        if len(data.find_all('table')) == 1:
            tr = data.find('tr')
            topic = tr.find('td').get_text()
            for a in tr.find_all('td')[-1].find_all('a'):
                name = a.get_text()
                href ='https://www.examcoo.com'+ a['href']
                lists.append({'name':name,'href':href,'topic':topic,'level':level})
        elif  len(data.find_all('table')) == 2:
            # tr = data.find('tr')
            topic = data.find('table').find('tr').get_text()
            trs = data.find_all('table')[-1].find_all('tr')
            for tr in trs:
                topic_2 = tr.find('td').get_text()
                for a in tr.find_all('td')[-1].find_all('a'):
                    name = a.get_text()
                    href ='https://www.examcoo.com'+ a['href']
                    lists.append({'name':name,'href':href,'topic':topic,'level':level,'topic_2':topic_2})


    return lists


# 数据修复
def data_xiufu():
    # 删除脏数据
    # examcoo_index.deleteMany({ "href": { "$regex": "com//" } })

    # 测试
    # seeds = examcoo_index.findAll({'level':1, "href": { "$not": { "$regex": "#" } } })
    # for s in seeds:
    #     print(s)

    # 数据修复
    response = requests.get('https://www.examcoo.com/index/ku', headers=headers, cookies=cookies)
    print(response.url,response)

    soups = BeautifulSoup(response.text,'lxml')
    datas =soups.find('div',id='main').find('table').find_all('tr')
    lists=[]
    for data in datas:
        topic = data.find('td').get_text()
        if topic not in ['公务员类','趣味测试类','企事业内部考试类']:
            continue
        for a in data.find_all('td')[-1].find_all('a'):
            name = a.get_text()
            href ='https://www.examcoo.com'+ a['href']
            lists.append({'name':name,'href':href,'topic':topic,'level':2})
    for ll in lists:
        ll["_id"] = ll['href']
        print(ll)
        try:
            examcoo_index.insertOne(ll)
        except Exception as e:
            examcoo_index.updateOne({"_id":ll["_id"]},ll)
@retry(stop_max_attempt_number=3)
def get_paper(url ='https://www.examcoo.com/paperlist/index/k/427/p/2'):
    response = requests.get(url, headers=headers, cookies=cookies,proxies=proxies)
    print(response.url,response)
    soups = BeautifulSoup(response.text, "lxml")
    h1 = soups.find('div',class_ ='main-box')
    contents = h1.select('table')[0]  # [0]将返回的list改为bs4类型
    tbl = pd.read_html(contents.prettify(), header=0)[0]
    hrefs=[]
    for tr in h1.find('table').find('tbody').find_all('tr'):
        pattern = r'<a[^>]*?href="([^"]*)"[^>]*?>查阅<\/a>'
        match = re.search(pattern, str(tr))
        if match:
            # print(match.group(1))
            hrefs.append('https://www.examcoo.com'+match.group(1))
        else:
            hrefs.append('https://www.examcoo.com/')
    tbl['href'] = hrefs
    # 删除列 '试卷名称',并重命名
    tbl = tbl.drop('试卷名称', axis=1)
    tbl.columns =['试卷编号', '试卷名称', '总分', '题数', '时限', '操作', '查阅', '测试', '出卷时间', '录入者',
       'href']
    article_infos = [{j: str(row[j]) for j in tbl.columns} for i, row in tbl.iterrows()]
    return article_infos
def main():
    print()
    # run 1
    # lists = get_ku()
    # for ll in lists:
    #     ll["_id"] = ll['href']
    #     try:
    #         examcoo_index.insertOne(ll)
    #     except Exception as e:
    #         examcoo_index.updateOne({"_id": ll["_id"]}, ll)

    # run 2
    # seeds = examcoo_index.findAll({'title': {'$ne': None},'level':1})
    # for seed in seeds:
    #
    #     lists=get_detail(url=seed['href'])
    #     for ll in lists:
    #         ll["_id"] = ll['href']
    #         try:
    #             examcoo_index.insertOne(ll)
    #         except Exception as e:
    #             examcoo_index.updateOne({"_id":ll["_id"]},ll)
    #

    # 试卷链接 run 3
    seeds = examcoo_index.findAll({'level':2})
    for seed in seeds:
        page=1
        # 使用正则表达式提取数字
        pattern = r"\d+"
        match = re.search(pattern, seed['topic'])
        # 输出提取的数字
        maxpage=100
        if match:
            maxpage = int(int(match.group())/30) + 1
        while True:
            # https: // www.examcoo.com / paperlist / index / k / 43 / p / 1
            # print(seed)
            url = str( seed['href']).rsplit('/',1)[0] + '/' + str(page)
            lists = get_paper(url)
            print(seed['topic'],seed['name'],url,len(lists))

            for ll in lists:

                ll["_id"] = ll['href']
                ll["base_url"] = url
                ll["topic"] = seed['topic']
                try:
                    ll["name"] = seed['topic']+'-'+seed['topic_2']+'-'+seed['name']
                except:
                    ll["name"] = seed['topic']+'-'+seed['name']
                try:
                    examcoo_paper.insertOne(ll)
                except Exception as e:
                    examcoo_paper.updateOne({"_id":ll["_id"]},ll)
                    # print(e)
            if len(lists) < 30 or page > maxpage:
                break
            page += 1
        # break
        seed['status'] = 'success'
        examcoo_index.updateOne({"_id":seed["_id"]},seed)
        time.sleep(2)





if __name__ == "__main__":
    print('beidi')
    # main()
    # print(get_paper())
    main()


    examcoo_index.close()
    examcoo_paper.close()









