# -*- coding: utf-8 -*-

import requests,json,os,urllib,string,sys,re
import scrapy
from zuqiu.items import ZuqiuItem
from zuqiu.sqlHelper import *
from time import strftime,gmtime

reload(sys)
sys.setdefaultencoding('utf8')

class GooooalSpider(scrapy.Spider):
    name = 'gooooal'
    allowed_domains = ['gooooal.com']
    handle_httpstatus_list = [301,403,404]
    offset=1
    type_text=["橄榄球","棒球","蓝球","足球"]
    type_list=[]
    type_list.append("http://app.gooooal.com/nflHome.do?method=nflRank")#橄榄球
    type_list.append("http://www.gooooal.com/live/bsb_live.html")#棒球
    type_list.append("http://app.gooooal.com/dataNbaTeamProfile.do?tid=[*]&lang=cn")#蓝球
    type_list.append("http://app.gooooal.com/dataTeamProfile.do?tid=[*]&lang=cn")#足球
    def parse(self, response):
        if response.status==404 or response.status==301 or response.status==403:
            print self.url+str(self.offset)+'------页面不存在'
        res=response.body
        if response.meta['type']==0:
            for each in response.xpath('//*[@id="NFL_Atable_1"]/table'):
                for i in range(2,6):
                    logo=each.xpath("./tr["+str(i)+"]/td[1]/img/@src").extract()[0]
                    name=each.xpath("./tr["+str(i)+"]/td[2]/text()[1]").extract()[0]
                    string = name.decode("utf-8")
                    cnobj = re.compile(u'[^\u4E00-\u9FA5]')#非中文
                    cn = cnobj.sub(r'', string)#replace
                    en=each.xpath("./tr["+str(i)+"]/td[2]/text()[2]").extract()[0]
                    enobj=re.compile(u'[\u4E00-\u9FA5]')
                    en=enobj.sub(r'',en)
                    if en[0:1]==' ':
                        en=en[1:len(en)]
                    if self.save_data(en,cn,''):
                        self.save_img(url,en)
        elif response.meta['type']==1:
            for each in response.xpath('//*[@id="div_datalist"]/li'):
                logo=each.xpath("./table/tbody/tr[2]/td/table/tbody/tr/td[1]/table/tbody/tr[2]/td[1]/img/@src").extract()[0]
                name=each.xpath("./table/tbody/tr[2]/td/table/tbody/tr/td[1]/table/tbody/tr[2]/td[1]/span/text()").extract()[0]
                if self.save_data(strftime("%Y-%m-%d-%H-%M-%S"),cn,''):
                    self.save_img(url,en)
                logo=each.xpath("./table/tbody/tr[2]/td/table/tbody/tr/td[1]/table/tbody/tr[3]/td[1]/img/@src").extract()[0]
                name=each.xpath("./table/tbody/tr[2]/td/table/tbody/tr/td[1]/table/tbody/tr[3]/td[1]/span/text()").extract()[0]
                if self.save_data(en,cn,''):
                    self.save_img(url,en)
                
        for each in response.xpath("//*[@id='main_02']/div[2]/table/tr"):
            item = ZuqiuItem()
            try:
                name=each.xpath('./td[2]/div/div/table/tr[1]/td/p/span/strong/text()').extract()[0]
                string = name.decode("utf-8")
                cnobj = re.compile(u'[^\u4E00-\u9FA5]')#非中文
                cn = cnobj.sub(r'', string)#replace
                enobj=re.compile(u'[\u4E00-\u9FA5]')
                en=enobj.sub(r'',string)
                en=en[1:]
                big=each.xpath("./td[2]/div/div/table/tr[2]/td/p/span/text()").extract()[0]
                big=cnobj.sub(r'',big)
                big=big.replace("繁体名","")
                url = each.xpath("./td[1]/div/img/@src").extract()[0]
                print cn
                if en[0:1]==' ':
                    en=en[1:len(en)]
#                self.save_data(en,cn,big)
                if self.save_data(en,cn,big):
                    self.save_img(url,en)

                yield item
            except Exception,e:
                print traceback.print_exc() 
                print str(self.offset)+'解析异常！'

        if self.offset < 90623:
            self.offset += 1
        print self.url+str(self.offset)
        yield scrapy.Request(self.url+str(self.offset)+'&lang=cn', callback = self.parse)

    def save_data(self,en,cn,big,logo):
        if sqlHelper.selectOne("select id from nba where en=%s and cn=%s",(en,cn)):
            sqlHelper.update("update nba set big=%s where en=%s",(big,en))
        else:
            sqlHelper.update("insert into nba(cn,en,logo,big) values(%s,%s,%s,%s)",(cn,en,logo,big))
        return True
    
    def save_img(self,img_url,file_name,file_path='nba'):
        img_url=img_url.replace('\/','/')
        try:
            if not os.path.exists(file_path):
                print '文件夹',file_path,'不存在，重新建立'
                #os.mkdir(file_path)
                os.makedirs(file_path)
            #获得图片后缀
            file_suffix = os.path.splitext(img_url)[1]
            #拼接图片名（包含路径）
            filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)
            filename=filename.replace('.com','_com')
            if string.find(filename,'.')==-1:
                filename=filename+'.png'
            if string.find(filename,'?')!=-1:
                filename=filename[0:len(filename)-4]
#       m下载图片，并保存到文件夹中
            urllib.urlretrieve(img_url,filename=filename)
        except IOError as e:
            print '文件操作失败',e
        except Exception as e:
            print '错误 ：',e
        
    def start_requests(self):
        pages=[]
        nlist=[1,1,200,110000]
        t=0
        for ul in self.type_list:
            for i in range(nlist[t]):
                url=ul.replace("[*]",str(i)
                page=scrapy.Request(url,self.parse,meta={'type':t},dont_filter=True)
                pages.append(page)
            t+=1
        return pages
