import scrapy
import sys
sys.path.append("..")
from scrapyProject.spiders.tianya.items import TitleItem, ContentItem, NextPageItem, LinkItem
from scrapyProject.spiders.spiderBase import spiderBase
from scrapyProject.spiders.tianya.tianyaDataBase import tianyaDataBase
import hashlib

'''
def getID(url, code='utf-8'):
    md5 = hashlib.md5()
    if code != "":
        md5.update(url.encode(code))  # 注意转码
    else:
        md5.update(url)  # 注意转码

    res = md5.hexdigest()
    return res
'''

class TianyaSpider(spiderBase):
    name = 'tianyaSpider'
    def start_requests(self):
        allowed_domains = ['http://bbs.tianya.cn/']
       #urls = ['http://bbs.tianya.cn/list-16-1.shtml']



        urls = []
        for url in self.getUnrequestUrl():
            urls.append(url)

        dataBase = tianyaDataBase()
        for url in dataBase.GetMayNeedUpdateLink():
            urls.append(url[1])


        if len(urls)==0:
            urls= ['http://bbs.tianya.cn/list-16-1.shtml']
            #urls= ['http://bbs.tianya.cn/list.jsp?item=16&grade=1&order=1']

            for url in urls:
                yield self.RequestUrl(url=url, callback=self.parse)
                #yield scrapy.Request(url=url, callback=self.parse)
        else:
            news_ids = list(set(urls))
            news_ids.sort(key=urls.index)
            urls = news_ids
            for url in urls:
                yield self.RequestUrl(url=url, callback=self.parse)
    def parse(self, response):
        filename = response.url.split("/")[-2]
        #a = response.css('title').extract()
        #b = response.xpath('/html/head/title/text()').extract()
        #c = response.xpath('.//title/text()').extract()
        selData = response.xpath('.//div[@class="mt5"]')
        selCat = selData.xpath('.//tbody')

        selTitle = selCat.xpath('.//td[contains(@class, "td-title")]')
        lsTitle = selTitle.xpath('string(.//a[@href])').extract()
        lsLink = selTitle.xpath('.//a[@href]/@href').extract()

        iNumItem = len(lsLink)

        lsAutor = selCat.xpath('.//a[@class="author"]/text()').extract()

        lsNextPage =  response.xpath('.//div[@class="short-pages-2 clearfix"]//a[@rel="nofollow"]/@href').extract()

        for i in range(iNumItem):
            self.AddNeedrequestUrl(response.urljoin(lsLink[i]))

        if len(lsNextPage) > 0:
            self.AddNeedrequestUrl(response.urljoin(lsNextPage[0]))

        for i in range(iNumItem):
            itemData = TitleItem()
            itemData['title'] = lsTitle[i].strip()
            itemData['dataLink'] = response.urljoin(lsLink[i])
            itemData['autor'] = lsAutor[i]
            itemData['url'] = response.url
            #itemData["id"] = getID(response.url)
            yield itemData
            yield  self.RequestUrl(url= itemData['dataLink'], callback=self.parseItem, meta={'item': itemData})

        if len(lsNextPage) > 0:
            nextPage = response.urljoin(lsNextPage[0])
            itemNextPage= NextPageItem()
            itemNextPage['dataLink'] = nextPage
            yield itemNextPage
            #yield scrapy.Request(url=nextPage, callback=self.parse)

    def parseItem(self, response):
        itemTitle = response.meta['item']
        contentData = response.xpath('.//div[@class="atl-main"]')
        firData=contentData.xpath('.//div[@class="atl-item host-item"]//div[@class="bbs-content clearfix"]')
        lsFirData = firData.xpath('string()')

        dataCat = contentData.xpath('.//div[@class="atl-head"]//strong[@class="host"]')
        dataItem = dataCat.xpath('../../../..')
        txtData = dataItem.xpath('.//div[@class="atl-content"]//div[@class="bbs-content"]')
        txtData = txtData.xpath('string()').extract()

        itemLink = LinkItem()
        itemLink['idTitle'] = itemTitle['id']
        itemLink['content'] = response.body
        itemLink['url'] = response.url
        yield itemLink




        for i in range(len(txtData)):
            itemContent = ContentItem()
            itemContent['idTitle'] = itemTitle['id']
            itemContent['idUrl'] = itemLink['id']
            itemContent["content"] =txtData[i].strip()
            itemContent["inxInUrl"] = i
            yield itemContent

        lsNextPage = response.xpath('.//a[@class="js-keyboard-next"]/@href').extract()
        if len(lsNextPage) > 0:
            nextPage = response.urljoin(lsNextPage[0])
            self.AddNeedrequestUrl(nextPage)
            yield  self.RequestUrl(url=nextPage, callback=self.parseItem, meta={'item': itemTitle})


