#!/usr/bin/env python
# -*- coding:utf-8 -*-

import urllib2
import re
import codecs
from bs4 import BeautifulSoup
from mylog import MyLog
from save2mysql import SaveToMySQL


class BookItem(object):
    name = None
    author = None
    category = None
    word_num = None
    introduction = None
    url = None


class GetBook(object):
    def __init__(self):
        self.base_url = 'https://www.qidian.com/all?action=1&orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page=1'
        self.urls = []
        self.items = []
        self.log = MyLog()
        self.page_num = self.get_page_num(self.base_url)
        self.urls = self.get_urls(self.base_url, self.page_num, 5)
        self.items = self.spider(self.urls)
        self.pipelines(self.items)
        SaveToMySQL(self.items)


    def get_page_num(self, base_url):
        html_content = self.get_response_content(base_url)
        soup = BeautifulSoup(html_content, 'lxml')
        page_num = soup.find('div', attrs={'class': 'pagination fr'}).attrs['data-pagemax']

        self.log.info('Get Page Num: %s' % page_num)
        return page_num


    def get_response_content(self, url):
        try:
            response = urllib2.urlopen(url)
        except:
            self.log.error('Url: %s Open Error' % url)
            return None
        else:
            self.log.info('Url: %s Open Success' % url)
            return response.read()


    def get_urls(self, base_url, page_num, num):
        if (page_num >= num):
            final_num = num
        else:
            final_num = page_num

        url_list = base_url.split('=')

        urls = []
        for i in xrange(1, final_num + 1):
            url_list[-1] = str(i)
            url = '='.join(url_list)
            self.log.info("Get A Url: %s" % url)
            urls.append(url)

        return urls


    def spider(self, urls):
        items = []
        for url in urls:
            html_content = self.get_response_content(url)
            soup = BeautifulSoup(html_content)
            books = soup.find('ul', attrs={'class': 'all-img-list cf'}).find_all('li')

            for book in books:
                mid_info = book.find('div', attrs={'class': 'book-mid-info'})
                name = mid_info.h4.a.get_text()
                url = mid_info.h4.a.attrs['href']
                author_info = mid_info.find('p', attrs={'class': 'author'}).find_all('a')

                author = author_info[0].get_text()
                category = ''
                category += author_info[1].get_text()
                category += '*'
                category += author_info[2].get_text()
                introduction = mid_info.find('p', attrs={'class': 'intro'}).get_text().strip()
                word_num = mid_info.find('p', attrs={'class': 'update'}).span.get_text()

                self.log.info('Get A Book')
                self.log.info('name: %s' % name)
                self.log.info('author: %s' % author)
                self.log.info('category: %s' % category)
                self.log.info('word_num: %s' % word_num)
                self.log.info('introduction: %s' % introduction)
                self.log.info('url: %s' % url)

                item = BookItem()
                item.name = name
                item.author = author
                item.category = category
                item.word_num = word_num
                item.introduction = introduction
                item.url = url

                items.append(item)

        return items


    def pipelines(self, items):
        filename = 'fiction.txt'

        with codecs.open(filename, 'w', 'utf8') as fp:
            for item in items:
                fp.write(item.name + '\n')
                fp.write(item.author + '\n')
                fp.write(item.category + '\n')
                fp.write(item.word_num + '\n')
                fp.write(item.introduction + '\n')
                fp.write(item.url + '\n\n')

        self.log.info('Write To File fiction.txt Success')


if __name__ == '__main__':
    GB = GetBook()
