#-*- coding:utf8 -*-
import scrapy
from ikanman.items import IkanmanItem
import os
import sys
import faker
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
import urllib

reload(sys)
sys.setdefaultencoding('utf8')

f = faker.Factory.create()

class ikanmanSpider(scrapy.Spider):
    name = 'ikanman'
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
        'Connection': 'keep-alive',
        'Host': 'www.ikanman.com',
        'Referer': 'http://www.ikanman.com/',
        'User-Agent': f.user_agent()
        }

    def __init__(self,comic=None,author=None,path=None,*args,**kwargs):
        super(ikanmanSpider, self).__init__(*args,**kwargs)
        self.start_urls=['http://www.ikanman.com/s/%s.html' % comic]
        self.author = author
        self.driver = webdriver.PhantomJS()
        _path = os.environ['HOME']
        self.path = _path + '/' + path if path is not None else _path + '/workspace/comic'
	
    def start_request(self):
        _headers=self.headers
        _headers['Referer'] = 'http://www.ikanman.com/'
        for url in self.start_urls:
            yield scrapy.Request(url=url,callback=self.parse,headers=_headers)

    def parse(self, response):
        _headers=self.headers
        _headers['Referer'] = 'http://www.ikanman.com/'
        yield scrapy.Request(response.url,callback=self.parse1,headers=self.headers)
        next_pages = response.xpath('//div[@class="pager-cont"]/div/a[@class="prev"]/@href').extract()
        for page in set(next_pages):
            yield scrapy.Request(url=response.urljoin(page),callback=self.parse1,headers=self.headers)

    def parse1(self, response): #获取漫画主页面response
        _meta = {}
        _headers=self.headers
        list_url = response.xpath('//dl/dt/a/@href').extract()
        for url in list_url:
            _headers['Referer'] = response.urljoin(url)
            _meta['headers'] = _headers
            yield scrapy.Request(url=response.urljoin(url),callback=self.parse2,headers=_headers,meta=_meta)

    def parse2(self, response): #爬取漫画主页详细信息
        _headers = response.meta['headers']
        title = response.xpath('//div[@class="book-title"]/h1/text()').extract_first()
        other_title = response.xpath('//div[@class="book-title/h2/text"]').extract_first()
        publish_year = response.xpath('//ul[@class="detail-list cf"]/li[1]/span[1]/a/text()').extract_first()
        area = response.xpath('//ul[@class="detail-list cf"]/li[1]/span[2]/a/text()').extract_first()
        type_flag = response.xpath('//ul[@class="detail-list cf"]/li[2]/span[1]/a/text()').extract()
        author = response.xpath('//ul[@class="detail-list cf"]/li[2]/span[2]/a/text()').extract()
        status = response.xpath('//li[@class="status"]/span/span[1]/text()').extract_first()
        last_update_time = response.xpath('//li[@class="status"]/span/span[2]/text()').extract_first()
        last_update_chap = response.xpath('//li[@class="status"]/span/a/text()').extract_first()
        list_hrefs = response.xpath('//div[@class="chapter cf mt16"]/div/ul/li/a/@href').extract()
        list_chaps = response.xpath('//div[@class="chapter cf mt16"]/div/ul/li/a/@title').extract()
        list_photo_count = response.xpath('//div[@class="chapter cf mt16"]/div/ul/li/a/span/i/text()').extract()
        for index in xrange(len(list_hrefs)):
            path = self.path + '/' + title + '/' + list_chaps[index]
            _meta = {'title':title,'chap':list_chaps[index],'photo_count':list_photo_count[index]}
            _meta['headers'] = _headers
            _meta['path'] = path
            self.mkdir(path)
            yield scrapy.Request(url=response.urljoin(list_hrefs[index]),callback=self.parse3,headers=_headers, meta=_meta)
	
    def parse3(self, response): #进入漫画每个章节的首页，获取漫画新链接中的起始开始页和截止页码,解析每个页面的img src并以urlretrieve保存
        _headers = response.meta['headers']
        self.driver.get(response.url)
        list_page_count = self.driver.find_elements_by_xpath('//div[@id="pagination" and @class="pager"]/a')
        page_count = list_page_count[-3].get_attribute('text')
        for index in xrange(1,int(page_count)+1):
            try:
                manga_page = '%s#p=%d' % (response.url,index)
                driver = webdriver.PhantomJS()
                driver.get(manga_page) #超时是个问题，超时就执行不了find_element_by_id,准备输出异常信息到文件,再重新爬
                mangaFile = driver.find_element_by_id('mangaFile')
                src = mangaFile.get_attribute('src').replace('i.hamreus.com:8080','p.yogajx.com')
                filename = response.meta['path'] + '/' + src.split('/')[-1].split('.')[0] + '.jpeg'
                self.retrieve(src, filename)
                print filename
            except Exception as ex:
                message = 'Driver get timeout: %s, %s, %s, %s, %s, %s' %(response.meta['title'],response.meta['chap'],response.meta['photo_count'],str(index),manga_page,src)
                print message
            finally:
                driver.close()
                driver.quit()
                del driver

    def mkdir(self, path): #针对漫画的每个章节创建指定文件夹
        if not os.path.exists(path):
            os.makedirs(path)

    def retrieve(self, url, filename): 
        #保存每个img的src，以后缀jpeg结尾,centos6上png显示不了,如果driver超时则获取不到img的src,是个问题
        try:
            urllib.urlretrieve(url, filename)
        except Exception as ex:
            print 'urlretrieve time out:', url
