import random
import re
import time
import requests
import xlrd
from bs4 import BeautifulSoup
from loguru import logger
from retrying import retry
from config import headers, dis_second, dis_first, dis_fourth, dis_third, dis_index
from my_selenium import selenium_get
from func import page_point_upload, is_number, format_data


def index_to_URL(url, url_re, index):
    URL = None
    index = index.replace("\n", "").replace(" ", "")
    if any(x == index[:len(x)] for x in ["//www.", 'www']):
        if 'www' == index[:len('www')]:
            URL = 'http://' + index
        elif '//www.' == index[:len('//www.')]:
            URL = 'http:' + index
    elif index[:3] == '../':
        num = len(re.findall(r"\.\./", index)) + 1
        new_url = '/'.join(x for x in url_re.split('/')[:-num]) + '/'
        URL = new_url + index.replace("../", '')
    elif index[:2] == './':
        URL = url_re + index[2:]
    elif index[:1] == '/':
        URL = host + '/' + index[1:]
    elif all(x != index[:len(x)] for x in ['./', '../', '/', 'http']):
        if '/' not in index:
            URL = '/'.join(url.split("/")[:-1]) + '/' + index
        else:
            URL = host + '/' + index
    elif index[0] == '?':
        URL = url + index
    else:
        URL = index
    return URL


def set_url(url):
    if '.html' or '?' in url:
        url_re = '/'.join(url.split('/')[:-1]) + '/'
    else:
        url_re = url if url[-1] == '/' else url + '/'
    return url_re


@retry(stop_max_attempt_number=3, wait_fixed=5 * 1000)
def http_get(url):
    response = requests.get(url, headers=headers)
    return response


def spider(tag_name, url, host):
    resp, _ = selenium_get(url)
    siteId = re.findall(r'''siteId:(\d+)''', resp, re.S)[0]
    js_url = f"{host}/{siteId}.js"
    resp_json = http_get(js_url).content.decode()
    json_str = resp_json.replace("var catalogs = ", '').replace("false", 'None').replace("true", "None")
    resp_data = eval(json_str)
    pages = []
    for k, data_list in resp_data.items():
        for dl in data_list:
            title = dl['name']
            if all(x not in dl['uri'] for x in ['http', 'https']):
                URL = f"{host}{dl['uri']}"
            else:
                URL = dl['uri']
            URL_host_re = re.findall(r"(http.*?(?:cn|com|org|net))/", URL, re.S)
            if not URL_host_re:
                continue
            URL_host = URL_host_re[0]
            if URL_host.replace("https", 'http') != host.replace("https", 'http'):
                continue
            if all(x not in title for x in dis_second) and all(x != title for x in dis_first):
                pages.append([title, URL])
    return pages


if __name__ == '__main__':
    workbook = xlrd.open_workbook('江苏省级.xlsx')
    worksheet = workbook.sheet_by_index(0)
    for read_row in range(worksheet.nrows):
        row_data = worksheet.row_values(read_row)
        host_url = row_data[2]
        host = re.findall(r"(http.*?(?:cn|com|org|net))/", host_url, re.S)
        host = host[0]
        org_id = int(row_data[0])
        logger.warning(f"{org_id}   {row_data[1]}   {host_url}")
        page_list = spider(row_data[1], host_url, host)

        data = format_data(org_id, page_list)
        print(len(data))
        page_point_upload(data)

