import re
import time
from urllib import parse
import arrow
import xlrd
from loguru import logger
from selenium.webdriver.common.by import By
from config import dis_second, dis_first, dis_fourth, dis_third, dis_index, dis_need, dis_mid, dis_special
from my_selenium import selenium_get, get_chrome_driver
from func import page_point_upload, is_number, format_data, SQLServer, str_sub, is_same, get_xpath_root_path


class MSpider:
    driver = get_chrome_driver()
    driver.set_page_load_timeout(10)
    driver.implicitly_wait(15)
    driver.set_script_timeout(10)
    time.sleep(1)

    def __init__(self, level):
        self.pages = []
        self.tab = 1
        self.level = level

    def selenium_driver(self, href):
        try:
            self.driver.get(href)
        except:  # 报错后就强制停止加载 # 这里是js控制
            self.driver.execute_script('window.stop()')
        time.sleep(2)
        try:
            tags = self.driver.find_elements(By.XPATH, '//a')
        except:
            return None, None, None
        current_url = self.driver.current_url
        page_content = self.driver.page_source
        return page_content, tags, current_url

    def get_tag_text(self, tag):
        if tag.get_attribute("title") and "$[" not in tag.get_attribute("title"):
            title = tag.get_attribute('title')
        elif tag.get_attribute("aria-label"):
            title = tag.get_attribute("aria-label")
        elif tag.get_attribute('text'):
            title = tag.get_attribute('text')
        elif tag.text:
            title = tag.text.strip().replace("\n", "").replace("\r\n", '').replace('\t', '').replace("\r", '')
        else:
            title = None
        if title:
            title = ''.join(title.split())
        return title

    def title_check(self, title):
        title_short = title
        for astr in dis_special:
            title_short = title_short.replace(astr, '')
        result = False
        if self.tab < 3 and any(x == title[-len(x):] for x in dis_third):  # title最后一位字符串在列表中时就不取
            result = True
        elif is_number(title):
            result = True
        elif any(x in title for x in dis_second) or any(x == title for x in dis_first):
            result = True
        elif all(x not in title for x in dis_need):
            result = True
        elif len(title_short) > 10:                   # 长度留在最后判断
            result = True
        else:
            result = False
        return result

    def get_tag_href(self, tag):
        index = None
        for tag_name in ['href', 'data-url', 'data-href', 'data-uri', 'data-showhref']:
            if tag.get_attribute(tag_name) and all(x not in tag.get_attribute(tag_name).lower()
                                                   for x in ["iframe", "javascript"]):
                index = tag.get_attribute(tag_name)
                break
        if not index:
            if tag.get_attribute('onclick'):
                tag_obj = tag.get_attribute('onclick')
                action = re.findall(r"\.href='(.*?)'", str(tag_obj), re.S)
                if not action:
                    action = re.findall(r"window.open\('(.*?)'", str(tag_obj), re.S)
                if action:
                    index = action[0]
        if not index or 'javascript' in index or all(x == '#' for x in index):  # 是否为网页链接
            return None
        if 'javascript:link(' in index:
            index = re.findall(r'''javascript:link\('(.*?)\)''', tag.get_attribute('href'))
            if index:
                index = index[0]
        index = parse.unquote(index)
        if 'http' not in index and index[:2] == '//':
            index = f'http:{index}'
        return index

    def url_check(self, url, host, init_url):
        url = url.replace("https", "http")
        host = host.replace("https", "http")
        init_url = init_url.replace("https", "http")
        city_host = re.findall(r'''http.*?(?:\w+\.)?(\w+)(?:\.gov)?\.cn''', host)[0]  # 应该保证必有city_host
        if not url:
            return True
        elif any(x in url for x in dis_mid):
            return True
        elif any(x == url[-len(x):] for x in dis_index) or is_same(init_url, url) or is_same(host, url):  # 判断链接是否为政府网站首页地址
            return True
        elif any(is_same(x[1].replace("https", "http"), url) for x in self.pages):  # "相同"的URL直接不取
            return True
        elif any(x in url for x in dis_fourth):
            return True
        elif re.findall(r'''/t?20\d{2}[-/.]?\d{2}[-/.]?(?:\d{2})?/?''', url):
            resp = max(re.findall(r'''/t?20\d{2}[-/.]?(?:\d{2}[-/.]?(?:\d{2})?)?''', url))
            resp = re.findall(r'''\d+''', resp.replace('/', ''))[0]
            year, month, day = resp[:4], resp[4:6], resp[6:]
            # year, month, day = re.findall(r'''/.?20(\d{2})[-/.]?(\d{2})[-/.]?(?:(\d{2}))?[/_]?''', url)[0]
            if year and 2000 > int(year) > int(arrow.now().strftime("%Y")):
                return True
            if month and int(month) <= 12:
                return True
            if day and int(day) <= 31:
                return True
        elif url == 'http://www.ycs.gov.cn/xzjdxxgk':
            return True
        # elif all(x.replace("https", "http") not in url for x in [init_url, f'xxgk.{city_host}', 'info.xingtai.gov.cn', 'info.hebei.gov.cn']):
        elif all(x.replace("https", "http") not in url for x in [host, f'xxgk.{city_host}', 'info.xingtai.gov.cn', 'info.hebei.gov.cn']):
            # 拼接的最终链接非该链接名下的就不是的 or # 拼接链接为当前地址
            # info.***.gov.cn:信息开放平台特殊域名
            if "://public." in url:
                if f"public.{city_host}.gov.cn" not in url:
                    return True
            else:
                return True
        else:
            return False
        return False        # 未被筛选掉的就是需要的(返回False)

    def spider(self, url, host, init_url):
        page_list = []
        content, tags, current_url = self.selenium_driver(url)
        if not content:
            return []
        if '403 Forbidden' in content:
            for i in range(len(self.pages)):
                if self.pages[i][1] == url:
                    self.pages.remove(self.pages[i])
                    return []
        if current_url.replace("https", "http") != url.replace("https", "http"):
            if str_sub(url, current_url) not in ["/", "#", '']:
                # http://www.nygxq.gov.cn/gk/fdzdgknr/zcwj与http://www.nygxq.gov.cn/gk/fdzdgknr/zcwj/不存在区别
                url = current_url
                logger.debug(f"更新新链接： {url}")
                content, tags, current_url = self.selenium_driver(url)
        if "data:," == current_url:
            content, tags, current_url = self.selenium_driver(url)
        for tag in tags:
            try:
                title = self.get_tag_text(tag)
                if not title:
                    continue
                # TODO 特殊的情况（title）
                if self.title_check(title):
                    continue
                menu_url = self.get_tag_href(tag)
                if not menu_url:
                    continue
                # TODO 特殊的情况（URL）
                if self.url_check(menu_url, host, init_url):
                    continue
            except Exception as err:
                logger.error(err)
                continue
            logger.info(f"{title}   {menu_url}")
            page_list.append([title, menu_url])
            self.pages.append([title, menu_url])  # self.pages.remove(x for x in self.pages if x[1] == 'http://www.gdqy.gov.cn/qyjyj/gkmlpt/index#9339')
        return page_list

    def main(self, org_id, unit_name, unit_url):
        org_id = int(org_id)
        init_host = re.findall(r"(http.*?\.(?:cn|com|org|net))/?", unit_url, re.S)[0]
        init_url = re.findall(r"(http.*/?)", '/'.join(x for x in unit_url.split("/")[:-1]) + '/')[0]
        page_list = [[unit_name, unit_url]]
        while True:
            cache = []
            for title, url in page_list:
                if self.tab != 1:
                    logger.warning(f"{self.tab}级标签   {title}   {url}")
                cache_page_list = self.spider(url, init_host, init_url)
                cache.extend(cache_page_list)
            self.tab += 1
            if not cache or self.tab > self.level:
                break
            page_list = cache
            logger.debug(f"{self.tab}级标签共: {len(page_list)}")
            time.sleep(3)
        data = format_data(org_id, self.pages)
        print(len(data))
        page_point_upload(data)


def crawl_xlsx(filepath, index=None, level=4):
    workbook = xlrd.open_workbook(filepath)
    worksheet = workbook.sheet_by_index(0)
    for read_row in range(index, worksheet.nrows):
        org_id, unit_name, unit_url = worksheet.row_values(read_row)
        logger.warning(f"{read_row}   {org_id}   {unit_name}   {unit_url}")
        spider = MSpider(level)
        spider.main(org_id, unit_name, unit_url)


def crawl(sql, index=None, level=4):
    if index:
        sql_result = SQLServer().query(sql)[index:]
    else:
        sql_result = SQLServer().query(sql)
        index = 0
    for num, data in enumerate(sql_result):
        org_id = data[0]
        unit_name = data[2]
        host_url = data[13].strip()
        if host_url[-1] != '/' and all(x not in host_url for x in ['.jsp', '.htm', '.html', '.php', 'shtml']):
            host_url += '/'
        logger.warning(f"{num + index}   {org_id}   {unit_name}   {host_url}")
        spider = MSpider(level)
        spider.main(org_id, unit_name, host_url)
