import os
import threading
import time
import random
import lxml
import lxml.etree
import requests
import typing
import re
import copy


user_agent_list = [
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
]


def getInfo() -> dict:
    return {
        "User-agent": random.choice(user_agent_list)
    }


def make_dirs(path: str):
    split = path.split(os.path.sep)
    dir_path_count = len(split)
    now_dir_name = split[0]
    if not os.path.exists(now_dir_name):
        raise IOError("Directory not exist: " + now_dir_name)
    for index in range(1, dir_path_count):
        now_dir_name = now_dir_name + os.path.sep + split[index]
        if not os.path.exists(now_dir_name):
            os.mkdir(now_dir_name)


def write_file(content, path="./loread.html"):
    file_abspath = os.path.abspath(path)
    if os.path.exists(path):
        os.remove(file_abspath)
    else:
        # 检测目录
        dir_path = os.path.split(file_abspath)[0]
        if not os.path.exists(dir_path):
            make_dirs(dir_path)
    with open(path, "w", encoding="utf-8") as file:
        file.write(content)


class XPathInfo:
    def __init__(self, dic: dict):
        self.htmlText: str = dic["htmlText"]
        self.xpathParam: str = dic["xpathParam"]
        self.xpathResult: list = dic["xpathResult"]
        self.xpathData: lxml.etree._Element = dic["xpathData"]

    def __str__(self):
        return '[\nself.htmlText =\n{0}\n\t self.xpathParam =\n\t\t{1}\n\t self.xpathResult =\n\t\t{2}]'.format(
            self.htmlText, self.xpathParam, self.xpathResult)

    def xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables) -> typing.List[lxml.etree._Element]:
        return self.xpathData.xpath(_path, namespaces=None, extensions=None, smart_strings=True, **_variables)


def user_xpath(htmlText: str, xpathParam: str = ".") -> typing.Dict:
    xpath_data: lxml.etree._Element = lxml.etree.HTML(htmlText)
    result = xpath_data.xpath(xpathParam)
    return {
        "htmlText": htmlText,
        "xpathParam": xpathParam,
        "xpathResult": result,
        "xpathData": xpath_data
    }


class SoXSccInfo:
    def __init__(self, alt_name, update_time, info, link):
        self.altName = alt_name
        self.updateTime = update_time
        self.info = info
        self.link = link

    def get_data(self):
        return "小说名称:\t{0}\n最后更新\t{1}\n简介:\n\t{2}\n链接:\t{3}".format(self.altName, self.updateTime, self.info, self.link)


class AnalysisPage(threading.Thread):
    def __init__(self, url, header) -> None:
        super(AnalysisPage, self).__init__()
        self.timeout = 2
        self.url = url
        self.header = header
        self._bookLink = ""
        self.analysisItems: typing.List[SoXSccInfo] = []
        self.pattern = re.compile(r'[0-9].*[0-9]+')
        self.analysis_lock = threading.Lock()
        self._isOver = False

    def run(self) -> None:
        session = requests.Session()
        while True:
            try:
                get_response = session.get(self.url, headers=self.header, timeout=5, verify=True)
                break
            except Exception:
                with self.analysis_lock:
                    print("re get link --> " + self.url)
                time.sleep(0.2)
                session = requests.Session()
        pageHtmlTxt = get_response.content.decode(get_response.encoding)
        xpathDataMap = user_xpath(pageHtmlTxt)
        xpathInfoObj = XPathInfo(xpathDataMap)
        self.analysisItems = self.get_list_items_of_session(get_response, session, xpathInfoObj)

    def get_list_items_of_session(self, get_response: requests.Response, session: requests.Session, xpath_info: XPathInfo) -> typing.List[SoXSccInfo]:
        release, hstr = get_response.url.split("://")
        release = release + "://"
        host = hstr.split("/")[0]
        li_label_nodes = xpath_info.xpath('//div[@class="main"]/ul[@class="list"]/li')  # 获取到所有书页
        result: typing.List[SoXSccInfo] = []
        for liLabelNode in li_label_nodes:
            href = liLabelNode.xpath("./a/@href")[0]
            bookLink = release + host + href
            with self.analysis_lock:
                self._bookLink = bookLink
                self._isOver = False
            while True:
                try:
                    get_response = session.get(bookLink, headers=self.header, timeout=self.timeout, verify=True)
                    break
                except Exception as ex:
                    with self.analysis_lock:
                        print("re get link --> " + bookLink)
                    time.sleep(0.2)
            with self.analysis_lock:
                self._isOver = True
            lnikHtmlText = get_response.content.decode(get_response.encoding)
            lnikXpathDataMap = user_xpath(lnikHtmlText)
            lnikXpathInfo = XPathInfo(lnikXpathDataMap)
            mainNodes = lnikXpathInfo.xpath(r'//div[@class="main"]')[0]  # 获取根据点
            detailNode = mainNodes.xpath(r'./div[@class="detail"]')[0]
            altName = detailNode.xpath(r'./img/@alt')[0]
            updateTime: typing.AnyStr = self.pattern.findall(detailNode[len(detailNode) - 1].text)[0]
            info = mainNodes.xpath(r'./div[@class="box"]/div[@class="intro"]/p')[0].text
            result.append(SoXSccInfo(altName, updateTime, info, bookLink))
        return result

    def get_print_info(self, var=None) -> str:
        if var is None:
            with self.analysis_lock:
                return self.name + " run job " + self.url + " --> " + self._bookLink + "[" + str(self._isOver) + "]"
        else:
            with self.analysis_lock:
                return self.name + " run job (" + str(var) + ")" + self.url + " --> " + self._bookLink + "[" + str(
                    self._isOver) + "]"


class AnalysisClass(threading.Thread):
    """
    解析整个类型
    """

    def __init__(self, buffUrl, lastNumber, header) -> None:
        super(AnalysisClass, self).__init__()
        self.buffUrl = buffUrl
        self.lastNumber = lastNumber
        self.header = header
        self.analysisPageList: typing.List[AnalysisPage] = []
        self.wait_list = []

    def run(self) -> None:
        # changed to for-loop joining
        for td in self.wait_list:
            td.join()

    def prepare(self):
        for index in range(1, self.lastNumber):  # 遍历爬取
            url = self.buffUrl + str(index) + ".html"
            job = AnalysisPage(url, self.header)
            job.start()
            self.analysisPageList.append(job)
        self.wait_list = copy.copy(self.analysisPageList)


def fetch_list_info_once(last_part):
    clone_url = "https://m.soxscc.net/"
    buff_url = clone_url[:] + last_part + "/"
    header = getInfo()
    getRequest = requests.get(buff_url, headers=header)
    htmlText = getRequest.content.decode("utf-8")
    xpathDataMap = user_xpath(htmlText)
    xpathInfoObj = XPathInfo(xpathDataMap)

    # 获取所有页面
    findLstaPageNode = xpathInfoObj.xpath(r'//body/div[@class="main"]/div[@class="pagelist"]/a')[1]
    make: str = findLstaPageNode.xpath(r'./@href')[0]
    pattern = re.compile(r'[0-9].*[0-9]+')
    make = pattern.findall(make)[0]
    last_number = int(make) + 1

    soxscc_class: AnalysisClass = AnalysisClass(buff_url, last_number, header)
    soxscc_class.prepare()
    soxscc_class.start()
    soxscc_class.join()
    analysis_result: typing.List[SoXSccInfo] = []
    for page in soxscc_class.analysisPageList:
        for item in page.analysisItems:
            analysis_result.append(item)
    list_info = sorted(analysis_result, key=lambda number: int(time.mktime(time.strptime(number.updateTime, "%Y-%m-%d %H:%M:%S"))), reverse=True)

    file_context_arr = []
    for info in list_info:
        file_context_arr.append(f"==============\n${info.get_data()}\n==============")

    file_path = "./soxsccInfo/" + last_part + "ListInfo.txt"
    print("write file is %s" % file_path)
    write_file('\n'.join(file_context_arr), file_path)


def get_list_infos(append_urls: typing.List[str]):
    for last in append_urls:
        fetch_list_info_once(last)


if __name__ == '__main__':
    get_list_infos(append_urls=[
        "xuanhuan",
        "wuxia",
        "lishi",
        "kehuan",
        "youxi",
        "lingyi",
        "xiaoyuan",
        "dushi",
        "hunlian",
        "zongcai",
        "tongren",
        "chuanyue",
        "xinshu",
    ])
