# -*- coding: utf-8 -*-
# scrapy crawl  test
#.encode()
import scrapy,time,re,MySQLdb
from scrapy.http import Request
from scrapy_test.items import Book,Chapter,Article  # 引入item

class book(scrapy.Spider):  # 需要继承scrapy.Spider类

    name = "test"  # 定义蜘蛛名  文件执行时用的名字
    start_urls = [  # 另外一种写法，无需定义start_requests方法
        'https://www.aiqu.la',
    ]


    def parse(self, response):
        book_list=response.css('.item')
        for book in book_list:
            book_url=book.css('div .image a::attr(href)').extract_first()
            yield Request("https://www.aiqu.la"+book_url, callback=self.book_parse)


    def book_parse(self, response):
        """
        书的信息和章节信息入库
        :param response:
        :return:
        """
        book_item = Book()
        name=response.css('#info h1::text').extract_first().strip()
        author=response.xpath("//div[@id='info']//p[1]//text()").extract_first()
        chapter_url = response.xpath("//div[@id='info']//p[4]//@href").extract_first()
        pattern = r'/book/(\d+)/(\d+)/(\d+)\.html'
        chapter_url = re.search(pattern, chapter_url)
        book_type,third_book_id,last_chapterid=0,0,0
        if chapter_url:
            book_type=chapter_url.group(1)
            third_book_id = chapter_url.group(2)
            last_chapterid=chapter_url.group(3)

        cover_img=response.css("#fmimg img::attr(src)").extract_first()
        author=author.replace("作  者：", "")
        update_time = response.xpath("//div[@id='info']//p[3]//text()").extract()[0]
        update_time = update_time.replace("最后更新：", "")
        update_time= int(time.mktime(time.strptime(update_time, "%Y-%m-%d %H:%M:%S")))
        desc= response.xpath("//div[@id='intro']//text()").extract_first().strip()
        last_chapter = response.xpath("//div[@id='info']//p[4]//text()").extract()[1]



        book_item["name"]=name
        book_item["book_type"] = book_type
        book_item["sequence"] = 0
        book_item["is_active"] = 1
        book_item["add_time"] = int(time.time())
        book_item["cover_img"] = cover_img
        book_item["author"] = author
        book_item["update_time"] = update_time
        book_item["desc"] = desc
        book_item["last_chapterid"] = last_chapterid
        book_item["last_chapter"] = last_chapter
        book_item["user_id"] = 1
        book_item["third_book_id"] = third_book_id
        yield book_item


        chapter_list = response.xpath("//div[@id='list']//dl//dd//a").extract()

        for chapte in chapter_list:
            if "title=" not in chapte:
                pattern = r'<a href="/book/(\d+)/(\d+)/(\d+)\.html">(.*)</a>'
                chapter_group = re.search(pattern, chapte)
                if  chapter_group:

                    book_type = chapter_group.group(1)
                    third_book_id = chapter_group.group(2)
                    last_chapterid = chapter_group.group(3)
                    chapter_name = chapter_group.group(4)
                    chapter_item=Chapter()

                    sql = """
                                                 select id  from story_book where third_book_id=%s
                                                 """ % third_book_id
                    book_id_db=self.db(sql)
                    book_id = book_id_db[0]
                    chapter_item["book_id"]=book_id
                    chapter_item["name"] = chapter_name
                    chapter_item["level_id"] = 0
                    chapter_item["paths"] = 0
                    chapter_item["parent_id"] = 0
                    chapter_item["order_num"] = 0
                    chapter_item["add_time"] =time.time()
                    chapter_item["user_id"] = 0
                    chapter_item["size"] = 0
                    chapter_item["sale_price"] = 0
                    chapter_item["sale_num"] = 0
                    chapter_item["third_chapter_id"] = last_chapterid
                    yield chapter_item
                    chapter_url="https://www.aiqu.la/book/%s/%s/%s.html"% (book_type,third_book_id,last_chapterid)
                    yield Request(chapter_url, callback=self.chapte_parse)





    def chapte_parse(self, response):
        """
        章节内容入库
        :param response:
        :return:
        """
        if response.status == 404:
            self.write("chapte_parse_" + str(response.url))

        content = response.css("#content").extract_first()
        third_chapter=response.xpath("//div[@class='bottem1']//a[@rel='nofollow']//@onclick").extract_first()
        chapter_group = re.search(r'addBookMarkByManual\((\d+),(\d+),\'.*', third_chapter)
        if chapter_group:
            third_chapter_id=chapter_group.group(1)
            third_book_id = chapter_group.group(2)
            sql = """select id  from story_chapter where third_chapter_id=%s
                                                             """ % third_chapter_id
            chapter_id_db = self.db(sql)
            sql = """select id  from story_book where third_book_id=%s
                                                             """ % third_book_id
            book_id_db = self.db(sql)

            if chapter_id_db and book_id_db:
                chapter_id=chapter_id_db[0]
                article_item = Article()
                article_item["chapter_id"]=chapter_id
                article_item["content"]=content
                article_item["book_id"] = book_id_db[0]
                article_item["user_id"] = 0
                yield article_item


    def db(self, sql):
        try:
            self.conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd="", db="pachong",
                                        charset="utf8",port=3306, use_unicode=True)
            self.cursor = self.conn.cursor()
            self.cursor.execute(sql)
            data = self.cursor.fetchone()
            return data
        except Exception as e:
            print(e)
            self.write("_"+str(e))


    def write(self,is_str):
        fo = open("foo.txt", "a", encoding="utf-8")
        fo.write(is_str)
        fo.write("\n")

        # 关闭打开的文件
        fo.close()















