# -*- coding: utf-8 -*-

import requests,json,os,urllib,string,sys,re
import scrapy
from gooooal.sqlHelper import *

reload(sys)
sys.setdefaultencoding('utf8')

class NbaSpider(scrapy.Spider):
    name = 'nba'
    allowed_domains = ['gooooal.com']
    handle_httpstatus_list = [301,403,404]
    offset=1
    url='http://app.gooooal.com/dataNbaTeamProfile.do?tid='
    endurl='&lang=cn'
    start_urls = [url+str(offset)+endurl]
    def parse(self, response):
#        print '---'
        if response.status==404 or response.status==301 or response.status==403:
            print self.url+str(self.offset)+'------页面不存在'
        for each in response.xpath("//*[@id='main_02']/div[2]/table/tr"):
            try:
                name=each.xpath('./td[2]/div/div/table/tr[1]/td/p/span/strong/text()').extract()[0]
                string = name.decode("utf-8")
                cnobj = re.compile(u'[^\u4E00-\u9FA5]')#非中文
                cn = cnobj.sub(r'', string)#replace
                enobj=re.compile(u'[\u4E00-\u9FA5]')
                en=enobj.sub(r'',string)
                en=en[1:]
                big=each.xpath("./td[2]/div/div/table/tr[2]/td/p/span/text()").extract()[0]
                big=cnobj.sub(r'',big)
                big=big.replace("繁体名","")
                url = each.xpath("./td[1]/div/img/@src").extract()[0]
                print cn+'--'+en+'--'+big+'--'+url
                if en[0:1]==' ':
                    en=en[1:len(en)]
                if self.save_data(en,cn,big,en+url[len(url)-4:]):
                    self.save_img(url,en)

            except Exception,e:
                print traceback.print_exc() 
                print str(self.offset)+'解析异常！'

        if self.offset < 90623:
            self.offset += 1
        yield scrapy.Request(self.url+str(self.offset)+self.endurl, callback = self.parse)

    def save_data(self,en,cn,big,logo):
        if sqlHelper.selectOne("select id from team where en=%s and cn=%s and typeText=%s",(en,cn,'篮球')):
            print str(self.offset)+big+'--已存在，更新LOGO'
            sqlHelper.update("update team set big=%s,source=%s,logo=%s where cn=%s and typeText=%s",(big,self.url+str(self.offset)+self.endurl,logo,cn,'篮球'))
        else:
            sqlHelper.update("insert into team(type,cn,en,logo,big,source) values(%s,%s,%s,%s,%s,%s)",('篮球',cn,en,logo,big,self.url+str(self.offset)+self.endurl))
        return True
    
    def save_img(self,img_url,file_name,file_path='nba'):
        img_url=img_url.replace('\/','/')
        try:
            if not os.path.exists(file_path):
                print '文件夹',file_path,'不存在，重新建立'
                #os.mkdir(file_path)
                os.makedirs(file_path)
            #获得图片后缀
            file_suffix = os.path.splitext(img_url)[1]
            #拼接图片名（包含路径）
            filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)
            filename=filename.replace('.com','_com')
            if string.find(filename,'.')==-1:
                filename=filename+'.png'
            if string.find(filename,'?')!=-1:
                filename=filename[0:len(filename)-4]
#       m下载图片，并保存到文件夹中
            urllib.urlretrieve(img_url,filename=filename)
        except IOError as e:
            print '文件操作失败',e
        except Exception as e:
            print '错误 ：',e

