import re
import time
import tkinter as tk
from tkinter import ttk
import os
import tkinter.messagebox
import threading
import requests
from lxml import etree
import multiprocessing
import sys
import logging
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import logging.handlers
import datetime
from urllib.parse import urlencode
import novel_source_data as nsd

if not os.path.exists("log"):
    os.mkdir("log")


# 创建日志
def log(msg):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    # 日志格式
    fmt = "%(asctime)s %(levelname)s [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s"
    formatter = logging.Formatter(fmt)

    to_day = datetime.datetime.now()
    # log_file_path = 'log/novel_download.log'.format(to_day.year, to_day.month, to_day.day)
    log_file_path = 'log/novel_down_{}_{}_{}.log'.format(to_day.year, to_day.month, to_day.day)

    # 输出到文件，每日一个文件
    fh = logging.handlers.TimedRotatingFileHandler(log_file_path, when='MIDNIGHT', interval=1, backupCount=0,
                                                   encoding="utf-8")
    fh.setFormatter(formatter)
    fh.setLevel(logging.INFO)
    logger.addHandler(fh)
    logger.info(msg)
    logger.handlers = []


# # 设置修改默认的输入日志格式
# fmt = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s'
# # 设置日志保存到文件中
# logging.basicConfig(level=logging.WARNING, format=fmt, filename="./log/novel_download_log.log")


class MainWindows(tk.Tk):
    def __init__(self, download_quequ, result_quequ):
        super().__init__()  # 初始化基类
        self.title("爬虫")
        self.ini_ui()  # 界面的组件的初始化操作

        self.download_quequ = download_quequ  # 下载队列
        self.result_quequ = result_quequ  # 结束队列

        self.novel_status()  # 监控结束队列是否有数据
        self.header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36'}

    novel_source = ''  # 小说源

    # 界面的组件的初始化操作
    def ini_ui(self):
        self.geometry("{}x{}+600+200".format(500, 600))
        self.resizable(width=False, height=False)  # 禁止窗口拉伸

        self.tab = ttk.Notebook(self)
        self.tab.place(relwidth=0.887, relheight=0.876)

        self.Frame1_ui()
        self.Frame2_ui()

        # 关闭窗口事件
        self.protocol("WM_DELETE_WINDOW", self.close)

    def Frame1_ui(self):
        self.tab1_frame = ttk.Frame(self.tab)

        self.lab1 = tk.Label(self.tab1_frame, text='小说源：')
        self.lab1.place(x=100, y=30)

        self.lab2 = tk.Label(self.tab1_frame, text='小说名称：')
        self.lab2.place(x=100, y=70)

        self.lab3 = tk.Label(self.tab1_frame, text='搜索结果：')
        self.lab3.place(x=100, y=110)

        self.lab4 = tk.Label(self.tab1_frame, text='提示信息：')
        self.lab4.place(x=100, y=310)

        # 下拉选择小说源
        self.cbl = ttk.Combobox(self.tab1_frame, width=22)  # 初始化

        self.cbl["values"] = tuple([x for x in nsd.novel_source.keys()])

        self.cbl.current(0)  # 选择第一个
        self.cbl.place(x=160, y=30)

        # 小说名输入框
        self.entry1 = tk.Entry(self.tab1_frame, borderwidth=3, width=24)
        self.entry1.place(x=160, y=70)

        # 搜索按钮
        self.btn1 = tk.Button(self.tab1_frame, text='搜索')
        self.btn1.place(x=350, y=67)
        self.btn1.config(command=self.btn_click)

        # 搜索结果list
        self.lb = tk.Listbox(self.tab1_frame, width=25)
        self.lb.place(x=160, y=115)
        self.lb.bind('<Double-Button-1>', self.lb_double_click)

        vbar1 = tk.Scrollbar(self.tab1_frame, orient=tk.VERTICAL, command=self.lb.yview)
        vbar1.place(x=338, y=115, height=183)
        self.lb.configure(yscrollcommand=vbar1.set)

        # 下载提示
        self.tx = tk.Text(self.tab1_frame, width=25, height=15)
        self.tx.place(x=160, y=320)
        self.tx.configure(state='disabled')

        vbar2 = tk.Scrollbar(self.tab1_frame, orient=tk.VERTICAL, command=self.tx.yview)
        vbar2.place(x=338, y=320, height=195)
        self.tx.configure(yscrollcommand=vbar2.set)

        self.tab.add(self.tab1_frame, text='小说')  # 把frame添加到tab中

    def Frame2_ui(self):
        self.tab2_frame = ttk.Frame(self.tab)
        self.tab.add(self.tab2_frame, text='下载')  # 把frame添加到tab中

        self.tree = ttk.Treeview(self.tab2_frame, show="headings")
        self.tree["columns"] = ("名称", "状态", "start", "end", "size", "操作")  # #定义列
        self.tree.column("名称", width=100)  # #设置列
        self.tree.column("状态", width=100)
        self.tree.column("start", width=100)
        self.tree.column("end", width=100)
        self.tree.column("操作", width=100)

        self.tree.heading("名称", text="名称")  # #设置显示的表头名
        self.tree.heading("状态", text="状态")
        self.tree.heading("start", text="start")
        self.tree.heading("end", text="end")
        self.tree.heading("size", text="size")
        self.tree.heading("操作", text="操作")
        self.tree.pack()

    # listbox的双击事件
    def lb_double_click(self, event):
        size = self.lb.size()  # 是否有值被选中
        if not bool(size):
            return
        name = self.lb.get(self.lb.curselection())  # 获取选中的listbox的值

        # 把要下载的小说放到小说下载队列里
        startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        id = self.tree.insert("", -1, values=(name, "下载中", startTime, "", "", ""))
        novel_data = {"type": '', "novel_name": name, "novel_url": self.search_results[name],
                      "novel_source": self.novel_source,
                      "novel_tree_id": id, "startTime": startTime}
        print(novel_data)
        self.download_quequ.put(novel_data)  # (小说名称，小说地址,小说源)

    # 搜索按钮点击事件
    def btn_click(self):
        print('开始搜索.....')
        log('开始搜索...')
        self.lb.delete(0, tk.END)  # 清空listbox的内容
        self.search_results = {}
        self.novel_source = self.cbl.get()  # 获取小说源
        novel = self.entry1.get()  # 获取输入的值

        # 没有输入小说名称 提示框提示 并且结束下面的事情
        if not novel:
            tkinter.messagebox.showinfo(title='提示', message='请输入小说名称')
            return

        self.search_novel(novel)

    # -----------------小说搜索----------------------------------
    # 把展示工作做了
    def par_page(self, url=False, res=None):
        novel_list_name_xpath = nsd.novel_source[self.novel_source]['novel_list_name_xpath']  # 获取所有小说名称的xpath
        novel_list_url_xpath = nsd.novel_source[self.novel_source]['novel_list_url_xpath']  # 获取所有小说url的xpath
        dispose_name = nsd.novel_source[self.novel_source]['dispose_name']  # 对小说名称做处理
        base_url = nsd.novel_source[self.novel_source]['base_url']  # 对小说url做处理
        dispose_url = nsd.novel_source[self.novel_source]['dispose_url']
        other_str = nsd.novel_source[self.novel_source]['other_str']
        html_coding = nsd.novel_source[self.novel_source]['html_coding']

        # 传递的是url
        if url:
            res = requests.get(url=url, headers=self.header, timeout=1)

        res.encoding = html_coding
        html = res.text
        par_html = etree.HTML(html)
        # 所有小说名称
        names = par_html.xpath(novel_list_name_xpath)
        # 所有小说url
        novel_urls = par_html.xpath(novel_list_url_xpath)

        # 是否需要对小说名进行处理
        if dispose_name:
            names = [re.findall(dispose_name, s)[0] for s in names]

        if dispose_url:
            novel_urls = [re.findall(dispose_url, s)[0] for s in novel_urls]

        # 是否对小说的url做处理
        if base_url:
            novel_urls = [base_url + x + other_str for x in novel_urls]

        # 结果
        search_result = dict(zip(names, novel_urls))
        print(search_result)
        self.search_results.update(search_result)
        # 往列表里添加数据
        for item in search_result:
            self.lb.insert("end", item)

    def search_novel(self, name):
        # 获取data的数据
        re_method = nsd.novel_source[self.novel_source]['re_method']  # 请求方式
        re_url = nsd.novel_source[self.novel_source]['re_url']  # 请求地址
        re_field = nsd.novel_source[self.novel_source]['re_field']  # 请求字段
        isPaging = nsd.novel_source[self.novel_source]['isPaging']
        isByte = nsd.novel_source[self.novel_source]['isByte']
        html_coding = nsd.novel_source[self.novel_source]['html_coding']

        if isByte:
            # 把utf-8字符转换为gbk字符  有的网站是gbk的  输入utf-8字符搜索不出结果
            name = bytes(name, 'gbk')

        # 判断请求方式
        if re_method == 'get':
            name = urlencode({re_field: name})
            url = re_url.format(name, 1)
            res = requests.get(url=url, headers=self.header, timeout=3)
        else:
            url = re_url
            data = {re_field: name}
            res = requests.post(url=url, headers=self.header, data=data)

        # 调用解析页面函数，解析第一页内容
        self.par_page(False, res)

        # 根据第一页判断搜索结果是不是分页
        if isPaging:
            # 分页，获取每一页的url  创建一个线程去处理
            res.encoding = html_coding  # 网页的编码
            html = res.text
            par_html = etree.HTML(html)

            s = par_html.xpath(isPaging['page_xpath'])
            if s:
                sum_code = re.findall(isPaging['code_reg'], s[0])  # 总页码
                if sum_code:
                    for x in range(2, int(sum_code[0]) + 1):
                        url = re_url.format(name, x)
                        t = threading.Thread(target=self.par_page, args=(url,))
                        t.start()
            else:
                # 如果分页有结果
                print('暂无搜索结果')

    # ----------------------------------------------------------

    # 监控结束队列是否有数据
    def novel_status(self):
        # 创建一个线程
        self.thread = threading.Thread(target=self.isfinish)
        self.thread.start()

    # 监测是否有小说下载完成
    def isfinish(self):
        while True:
            msg_data = self.result_quequ.get()
            if msg_data['type'] == 'false':
                break
            msg = '{}-{}-{}'.format(msg_data['novel_name'], msg_data['novel_source'], msg_data['msg'])

            self.tx.configure(state='normal')
            self.tx.insert(tk.END, msg + '\n')
            self.tx.configure(state='disabled')

            if msg_data['type'] == 2:
                self.tree.item(msg_data['novel_tree_id'], values=msg_data['value'])
                tkinter.messagebox.showinfo(title='提示', message=msg)

    # 窗口关闭事件
    def close(self):
        print('窗口关闭了')
        self.download_quequ.put({"type": 'false'})
        self.result_quequ.put({"type": 'false'})
        log('******************结束运行************************\n\n\n\n')
        sys.exit(0)


# 创建另外一个类，执行下载的操作
class MainRun:
    def __init__(self, download_quequ, result_quequ):
        log('******************开始运行************************')
        self.download_quequ = download_quequ
        self.result_quequ = result_quequ

    # 创建进程执行scrapy 爬虫
    def run(self):
        self.p = multiprocessing.Process(target=self.exec)
        self.p.start()

    # 创建一个进程，调用scrapy下载一本小说
    def exec(self):
        while True:
            novel_data = self.download_quequ.get()
            if novel_data['type'] == 'false':
                # 判断结束标志，结束创建循环
                break
            # print(novel)

            p = multiprocessing.Process(target=self.run_scrapy, args=(novel_data, self.result_quequ))
            p.start()

    # 运行scrapy
    def run_scrapy(cls, novel_data, result_quequ):
        # {"novel_name": name, "novel_url": self.search_results[name], "novel_source": self.novel_source,"novel_tree_id": id}
        msg = '-开始下载了....'
        msg1 = novel_data['novel_name'] + '-' + novel_data['novel_source'] + msg
        print(msg1)

        novel_data['type'] = 1
        novel_data['msg'] = msg
        result_quequ.put(novel_data)  # 放到输出信息的队列

        try:
            log(msg1 + '\n')

            process = CrawlerProcess(get_project_settings())
            process.crawl('novel', par=novel_data['novel_source'] + '|' + novel_data['novel_url'])
            process.start()  # the script will block here until the crawling is finished

        except Exception as e:
            print(e)
            log(e)

        # 是否真的下载完成
        novel_path = r'{}\novels\{}.txt'.format(os.getcwd(), novel_data['novel_name'])
        flag = os.path.exists(novel_path)

        endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))

        if flag:
            size = os.path.getsize(novel_path)
            unit = 'KB'
            if size > 1000:
                size = round(size / float(1024 * 1024), 2)
                unit = 'M'

            size = str(size) + unit
            novel_data['value'] = (novel_data['novel_name'], "已完成", novel_data['startTime'], endTime, size, "")
            msg = "-下载完成了"
        else:
            novel_data['value'] = (novel_data['novel_name'], '下载失败', '--', '')
            msg = "-下载失败了"

        msg2 = novel_data['novel_name'] + '-' + novel_data['novel_source'] + msg
        print(msg2 + '\n')
        log(msg2)
        novel_data['msg'] = msg
        novel_data['type'] = 2
        result_quequ.put(novel_data)


if __name__ == '__main__':
    multiprocessing.freeze_support()  # win下支持多进程

    download_quequ = multiprocessing.Queue()  # 存放小说url的队列
    result_quequ = multiprocessing.Queue()  # 小说完成队列

    runner = MainRun(download_quequ, result_quequ)
    runner.run()

    app = MainWindows(download_quequ, result_quequ)
    app.mainloop()
