# -*- coding: utf-8 -*-
import scrapy
import urllib2
import lxml.html
from urlparse import urljoin
from jparser import PageModel
from kxchina.items import KxchinaItem 

BASE_URL = "http://www.kepuchina.cn/kpcs/lcb/lcb2/"

class KxchinaspiderSpider(scrapy.Spider):
    name = 'kxchinaspider'

    allowed_domains = ['kepuchina.cn']
    start_urls = [BASE_URL]

    IS_NEXT = True
    NEXT_INDEX = 0
    article_list = []



    def parse(self, response):
        article_list = response.xpath('//b//a/@href').extract() 
        while self.NEXT_INDEX < 10 and self.IS_NEXT is True:
            self.NEXT_INDEX+=1
            self.log('do parse next page')
            href = 'index_?.shtml'
            next_page = urljoin(BASE_URL, href.replace('?',str(self.NEXT_INDEX)))
            self.log(next_page)
            scrapy.Request(next_page, callback=self.parse_xpath, method='GET')

        self.iteator_for_list()

    def iteator_for_list(self):
        for article in self.article_list:
            image = []
            link = BASE_URL+article.replace('./','')
            html = urllib2.urlopen(link).read().decode('utf-8')
            pm = PageModel(html)
            result = pm.extract()
            content = ''
            for x in result['content']:
                if x['type'] == 'text':
                    content += x['data']
                if x['type'] == 'image':
                    src = x['data']['src'].replace('./','')
                    link_arr = link.split('/')
                    img_link = link.replace(link_arr[len(link_arr)-1],src)
                    image.append(img_link)
            item = KxchinaItem(title=result['title'],content=content,url=link,image_urls=image )
            yield item
     

    def parse_xpath(self,response,result):
        self.log(result)
        if not response:
            list = response.xpath('//b//a/@href').extract()       
            self.article_list.append(list)
        if result is not True:
            self.IS_NEXT = False
