# -*- coding: utf-8 -*-
import scrapy
from qqnews.sqlHelper import *

class QqnewSpider(scrapy.Spider):
    name = 'qqnew'
    allowed_domains = ['tech.qq.com']
    url="http://tech.qq.com/c/it_"
    offset=1
    start_urls = ['http://tech.qq.com/c/it_'+str(offset)+".htm"]

    def parse(self, response):
        i=0
        for each in response.xpath('//*[@id="listZone"]/div'):
            i+=1
            try:
                url=each.xpath("./h3/a/@href").extract()[0]
                yield scrapy.Request(url,callback=self.parse_content)
            except:
                print self.url+str(self.offset)+".html"+"["+str(i)+"]页面分析异常"

        if self.offset<15:
            self.offset+=1
        yield scrapy.Request(self.url+str(self.offset)+".htm",callback=self.parse)

    def parse_content(self,response):
        title=response.xpath('//*[@id="Main-Article-QQ"]/div/div[1]/div[1]/div[1]/h1/text()').extract()[0]
        img=''
        for each in response.xpath('//img'):
            img+=each.xpath("./@src").extract()[0]+","
        if len(img)>5:
            img=img[0:len(img)-1]
#        img=response.xpath('//*[@id="Cnt-Main-Article-QQ"]/p[2]/img/@src').extract()[0]
        category=response.xpath('//*[@id="Main-Article-QQ"]/div/div[1]/div[1]/div[1]/div/div[1]/span[1]/a/text()').extract()[0]
        content=response.xpath('//*[@id="Main-Article-QQ"]/div/div[1]/div[1]/div[2]').extract()[0]
        date=response.xpath('//*[@id="Main-Article-QQ"]/div/div[1]/div[1]/div[1]/div/div[1]/span[3]/text()').extract()[0]
        style=1
        print title
        self.save_data(title,img,category,content,date,style)

    def save_data(self,title,img,category,content,date,style):
        if sqlHelper.selectOne("select title from news where title=%s",(title)):
            sqlHelper.update("update news set img=%s where title=%s",(img,title))
            return 
        sqlHelper.update("insert into news(title,img,category,content,style,date) values(%s,%s,%s,%s,%s,%s)",(title,img,category,content,style,date))
