# -*- coding: utf-8 -*-

import requests,json,os,urllib,string,sys,re
import scrapy
from zuqiu.items import ZuqiuItem
from zuqiu.sqlHelper import *

reload(sys)
sys.setdefaultencoding('utf8')

class fb90vsSpider(scrapy.Spider):
    name = '90vs'
    allowed_domains = ['90vs.com']
    handle_httpstatus_list = [301,403,404]
    offset=100000
    DEFAULT_REQUEST_HEADERS={
        "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Cookie":"Hm_lvt_1e27441f6737d6bab1f28ec8c4091309=1522830354; Hm_lpvt_1e27441f6737d6bab1f28ec8c4091309=1522830354",
        "Host":"90vs.com",
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
    
    }
    url='http://bf.90vs.com/team_info/'
    start_urls = [url+str(offset)+'.html']
    def parse(self, response):
        print '---'
        if response.status==404 or response.status==301 or response.status==403:
            print self.url+str(self.offset)+'------页面不存在'
        for each in response.xpath("//*[@id='team_info_0']/table/tr[1]"):
            try:
                name=each.xpath("./td[2]/text()").extract()[0]
                string=name.decode("utf-8")
                cnobj=re.compile(u'[^\u4E00-\u9FA5]')
                cn=cnobj.sub(r'',string)
                enobj=re.compile(u'[\u4300-\u9FA5]')
                en=enobj.sub(r'',string)
                en=en.replace("(","").replace(")","")
                url = each.xpath("./td[1]/table/tr/td/img/@src").extract()[0]
                print cn+"--"+en
                if en[0:1]==' ':
                    en=en[1:len(en)]
                if self.save_data(en,cn,''):
                    self.save_img(url,en)
            except Exception,e:
                print traceback.print_exc() 
                print str(self.offset)+'is not find'

        if self.offset < 109999:
            self.offset += 1
        print self.url+str(self.offset)
        yield scrapy.Request(self.url+str(self.offset)+'.html', callback = self.parse)

    def save_data(self,en,cn,big):
        if sqlHelper.selectOne("select id from team where en=%s and cn=%s",(en,cn)):
            print '重复数据'
        else:
            sqlHelper.update("insert into team(cn,en,logo,big) values(%s,%s,%s,%s)",(cn,en,en,big))
        return True
    
    def save_img(self,img_url,file_name,file_path='img'):
        img_url=img_url.replace('\/','/')
        try:
            if not os.path.exists(file_path):
                print '文件夹',file_path,'不存在，重新建立'
                #os.mkdir(file_path)
                os.makedirs(file_path)
            #获得图片后缀
            file_suffix = os.path.splitext(img_url)[1]
            #拼接图片名（包含路径）
            filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)
#       m下载图片，并保存到文件夹中
            urllib.urlretrieve(img_url,filename=filename)
        except IOError as e:
            print '文件操作失败',e
        except Exception as e:
            print '错误 ：',e

