# -*- coding: utf-8 -*-
import requests,json,os,urllib,string,sys
import scrapy
from S00.sqlHelper import *

reload(sys)
sys.setdefaultencoding('utf8')

class NqSpider(scrapy.Spider):
    name = 'nq'
    allowed_domains = ['500.com']
    offset=1
    num=0
    table="nba"
    url='http://liansai.500.com/lq/411/team/'
    start_urls = [url+str(offset)]
    DOWNLOAD_DELAY=0.1
    def parse(self, response):
        for each in response.xpath('//*[@id="bd"]/div[2]/div[2]/div[2]/div/div/div[1]'):
            try:
                cn=each.xpath("./div[2]/ul/li[1]/text()").extract()[0]
                en=each.xpath("./div[2]/ul/li[5]/text()").extract()[0]
                big=each.xpath("./div[2]/ul/li[3]/text()").extract()[0]
                url = each.xpath("./div[1]/@style").extract()[0]
                cn=cn[4:]
                en=en[4:]
                big=big[4:]
                url=url[21:len(url)-2]
                url="http://www.500.com"+url
                print cn+'------'+en+'------'+big+'------'+url
                if en[0:1]==' ':
                    en=en[1:len(en)]
                self.save_data(en,cn,big,en+url[len(url)-4:])
                self.save_img(url,en)

            except Exception,e:
                print traceback.print_exc()
                print str(self.offset)+'不存在'

        if self.offset < 3598:
            self.offset += 1
        yield scrapy.Request(self.url+str(self.offset), callback = self.parse)

    def save_data(self,en,cn,big,logo):
        print logo
        if sqlHelper.selectOne("select id from team where cn=%s and typeText=%s",(cn,'篮球')):
            sqlHelper.update("update team set source=%s,logo=%s where cn=%s and typeText=%s and source=%s",(self.url+str(self.offset),logo,cn,'篮球','500.com'))
            print cn+'-已存在'
            return False
        sqlHelper.update("insert into team(type,typeText,cn,en,big,logo,source) values(%s,%s,%s,%s,%s,%s,%s)",(31,'篮球',cn,en,big,logo,self.url+str(self.offset)))
        self.num+=1
        return True
    
    def save_img(self,img_url,file_name,file_path='nq'):
        try:
            if not os.path.exists(file_path):
                print '文件夹',file_path,'不存在，重新建立'
                os.makedirs(file_path)
            #获得图片后缀
            file_suffix = os.path.splitext(img_url)[1]
            #拼接图片名（包含路径）
            filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)
#       m下载图片，并保存到文件夹中
            urllib.urlretrieve(img_url,filename=filename)
        except IOError as e:
            print '文件操作失败',e
        except Exception as e:
            print '错误 ：',e

    def close(self,reason):
        print "采集完成,共采集了["+str(self.num)+"]条数据"
