# -*- coding: utf-8 -*-

import requests,json,os,urllib,string,sys
import scrapy
from zuqiu.items import ZuqiuItem
from zuqiu.sqlHelper import *

reload(sys)
sys.setdefaultencoding('utf8')

class ZqbigSpider(scrapy.Spider):
    name = 'zqbig'
    allowed_domains = ['gooooal.com']
    handle_httpstatus_list = [301,403,404]
    offset=1
    url='http://app.gooooal.com/dataTeamProfile.do?tid='
    start_urls = [url+str(offset)+'&lang=cn']
    def parse(self, response):
#        print '---'
        if response.status==404 or response.status==301 or response.status==403:
            print self.url+str(self.offset)+'------页面不存在'
        for each in response.xpath("//*[@id='main_02']/div[2]/table/tr"):
#            print '>>>>>each'
            item = ZuqiuItem()
            try:
                cn=each.xpath("./td[2]/div/div/table/tr[1]/td/p/span[1]/strong/text()").extract()[0]
                en=each.xpath("./td[2]/div/div/table/tr[1]/td/p/span[2]/text()").extract()[0]
                big=each.xpath("./td[2]/div/div/table/tr[2]/td[2]/p/span/text()").extract()[0]
                url = each.xpath("./td[1]/div/img/@src").extract()[0]
                if en[0:1]==' ':
                    en=en[1:len(en)]
                print big
                self.save_data(en,cn,big)
#                if self.save_data(en,cn,big):
#                    self.save_img(url,en)

                yield item
            except:
                print str(self.offset)+'is not find'

        if self.offset < 90623:
            self.offset += 1
        print self.url+str(self.offset)
        yield scrapy.Request(self.url+str(self.offset)+'&lang=cn', callback = self.parse)

    def save_data(self,en,cn,big):
        if sqlHelper.selectOne("select id from team where en=%s and cn=%s",(en,cn)):
            sqlHelper.update("update team set big=%s where en=%s",(big,en))
        else:
            sqlHelper.update("insert into team(cn,en,logo,big) values(%s,%s,%s,%s)",(cn,en,en,big))
        return True
    
    def save_img(self,img_url,file_name,file_path='img'):
        img_url=img_url.replace('\/','/')
        try:
            if not os.path.exists(file_path):
                print '文件夹',file_path,'不存在，重新建立'
                #os.mkdir(file_path)
                os.makedirs(file_path)
            #获得图片后缀
            file_suffix = os.path.splitext(img_url)[1]
            #拼接图片名（包含路径）
            filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)
            filename=filename.replace('.com','_com')
            if string.find(filename,'.')==-1:
                filename=filename+'.png'
            if string.find(filename,'?')!=-1:
                filename=filename[0:len(filename)-4]
#       m下载图片，并保存到文件夹中
            urllib.urlretrieve(img_url,filename=filename)
        except IOError as e:
            print '文件操作失败',e
        except Exception as e:
            print '错误 ：',e

