#!/usr/bin/env python
# -*- coding: utf-8 -*-

# 导入相应的库文件
import urllib.request
import xlwt
import requests
from lxml import etree
import time
import re
import mysql.connector
import os

# mydb = mysql.connector.connect(
#     host="192.168.47.102",  # 数据库主机地址
#     user="ccmadmin",  # 数据库用户名
#     database="FAST_START_REPTILE",  # 数据库名称 没有的话插入操作报错
#     passwd="211314wccA@"  # 数据库密码
#
# )
# base_dir="E:\data\小说\\"
# mycursor = mydb.cursor()
# # mycursor.execute("SHOW DATABASES")
# # mycursor.execute("CREATE TABLE sites (name VARCHAR(255), url VARCHAR(255))")
# book_type_insert_sql = "INSERT INTO reptile_wanshu_fiction_type (name, oper_id) VALUES (%s, %s)"
# sql = "INSERT INTO reptile_wanshu_fiction (book_name, author_name, source_book_url, book_type, source_download_url1, source_download_url2, status) VALUES (%s, %s, %s, %s, %s, %s, %s)"

# 初始化列表，存入爬虫数据
all_info_list = []

# 程序主入口
if __name__ == '__main__':
    # base_url = "https://m.iysw.net"
    base_url = "https://iysw.net"
    tmp = base_url + "/yq/"
    html = requests.get("https://ixdzs8.com/read/46710/")
    html.encoding = 'utf-8'
    print("==================================================")
    print(html.text)
    print("==================================================")
    labelfind = '<title>(.*?)</title>'
    labels = re.findall(labelfind, html.text, re.S)
    print(labels[0])
    labelfindurl = '<span class="btn-solid"><a href="(.*?)">TXT下载</a></span>'
    labelurls = re.findall(labelfindurl, html.text, re.S)
    print(labelurls[0])

    label_num = len(labels)

    for i in range(label_num):
        if i !=2:
            continue
        #print("开始解析====:" + labels[i] + "类型小说")
        label_dir=base_dir+labels[i]
        #os.makedirs(label_dir)
        full_label_url = base_url + labelurls[i]
        # print(full_label_url)
        label_response = requests.get("http://m.kenshuzw.info/xiaoshuo/227737/")
        label_response.encoding = 'utf-8'
        # print(label_response.text)
        total_page_find = '页次：1/(.*?)每页15总数'
        total_page_nums = re.findall(total_page_find, label_response.text, re.S)
        # print(total_page_nums)
        if (len(total_page_nums) > 0):
            for j in range(1, (int(total_page_nums[0]) + 1)):
                if (j <=227):
                    continue
                #print("开始解析"+labels[i]+"的第" + j.__str__() + "页====共有" + (int(total_page_nums[0]) + 1).__str__() + "页")
                page_dir = label_dir +"\第"+ j.__str__() + "页"
                if not os.path.exists(page_dir):
                    os.makedirs(page_dir)
                page_url = full_label_url
                if j == 1:
                    page_url = page_url
                else:
                    page_url = page_url + "index_" + j.__str__() + ".html"
                # print(page_url)
                book_response = requests.get(page_url)
                book_response.encoding = 'utf-8'
                book_find = '<tr>.*?<td width="290">.*?<b>.*?<a (.*?)</a>.*?</b>.*?</td>.*?</tr>'
                book_msgs = re.findall(book_find, book_response.text, re.S)
                # print(book_msgs)
                for book_msg in book_msgs:
                    book_name_find = '《(.*?)》'
                    book_url_find = 'href="(.*?)"'
                    book_names = re.findall(book_name_find, book_msg, re.S)
                    book_urls = re.findall(book_url_find, book_msg, re.S)
                    if (len(book_names) == 1 and len(book_urls) == 1):
                        print("开始解析" + labels[i] + "的第" + j.__str__() + "页的"+book_names[0]+"====共有" + (
                                    int(total_page_nums[0]) ).__str__() + "页")

                        full_book_url = base_url + book_urls[0]
                        # print(book_names[0] + " : " + full_book_url)

                        # book_detail_response = requests.get(full_book_url)
                        # book_detail_response.encoding = 'utf-8'
                        # book_download_find = '<a href="#ecms" onclick="window.open\(\'(.*?)\'\,\'\'\,\'width=900,height=900,resizable=yes\'\);">下载地址'
                        # book_download_urls = re.findall(book_download_find, book_detail_response.text, re.S)

                        author_find = '文章作者：</B>(.*?)<BR><B>文章类别：'
                        author = re.findall(author_find, book_detail_response.text, re.S)
                        # print(author)
                        # print(book_download_urls)
                        val = []
                        val.append(book_names[0])
                        if len(author) == 1:
                            val.append(author[0])
                        else:
                            val.append("未知")
                        val.append(full_book_url)
                        val.append((i + 2))

                        if (len(book_download_urls) == 2):
                            val.append(base_url + book_download_urls[0])
                            val.append(base_url + book_download_urls[1])
                            full_download_url = base_url + book_download_urls[0]
                        else:
                            continue
                            val.append("无")
                            val.append("无")



                        download_base_url = 'https://iysw.net/e/DownSys'
                        #full_download_url = base_url + book_download_urls[0]
                        download_page = requests.get(full_download_url)
                        download_page.encoding = 'utf-8'
                        down_ff = '<a href="..(.*?)" title'
                        down_ff_urls = re.findall(down_ff, download_page.text, re.S)
                        full_finally_url = ""
                        if len(down_ff_urls) != 0:
                            full_finally_url = download_base_url + down_ff_urls[0]

                        # print(full_finally_url)
                        if full_finally_url != '':
                            file_res = requests.get(full_finally_url, allow_redirects=False)
                            lcoal_name = file_res.headers.get("location")
                            # print(file_res.headers)
                            # print(lcoal_name)
                            file_name=sub_str = re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])","",book_names[0])
                            if "rar" in lcoal_name:
                                try:
                                    urllib.request.urlretrieve(full_finally_url,
                                                               '{0}{1}.rar'.format(page_dir + "\\",
                                                                                   file_name))
                                    print("小说: " + book_names[0] + '保存结束')
                                except:
                                    print("此连接异常")
                            else:
                                try:
                                    urllib.request.urlretrieve(full_finally_url,
                                                               '{0}{1}.txt'.format(page_dir + "\\",
                                                                                   file_name))
                                    print("小说: " + book_names[0] + '保存结束')
                                except:
                                    print("此连接异常")
                            val.append(1)
                            param = tuple(val)
                            mycursor.execute(sql, param)
                            mydb.commit()
                        else:
                            val.append(0)
                            param = tuple(val)
                            mycursor.execute(sql, param)
                            mydb.commit()

