# -*- coding: utf-8 -*-

import requests,json,os,urllib,string,sys,re,random
import scrapy
from gooooal.sqlHelper import *
from time import gmtime,strftime

reload(sys)
sys.setdefaultencoding('utf8')

class BqSpider(scrapy.Spider):
    name = 'bq'
    num=0
    allowed_domains = ['gooooal.com']
    handle_httpstatus_list = [301,403,404]
    DEFAULT_REQUEST_HEADERS={
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language":"zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Cookie": "UM_distinctid=1625237ab4c292-005000bbad7ee7-2a03457b-1fa400-1625237ab50f1d; _gl_lang=cn; CNZZDATA1256409754=1772650058-1521797211-%7C1523174227; __utma=59084091.1576204156.1521797803.1523170530.1523178579.3; __utmz=59084091.1523178579.3.2.utmcsr=gooooal.com|utmccn=(referral)|utmcmd=referral|utmcct=/live/bsb_live.html; JSESSIONID=EAE674DA4C783A90648062AE085E477C; __utmc=59084091; __utmb=59084091.8.10.1523178579; lastPointNewsIdInfo=null",
        "Host": "app.gooooal.com",
        "Referer": "http://app.gooooal.com/bsbMatch.do?date=20180409&lang=cn",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/65.0.3325.181 Chrome/65.0.3325.181 Safari/537.36"
    }
    offset=59
    start_urls = ['http://app.gooooal.com/bsbMatch.do?date=20180409&lang=cn']
    def parse(self, response):
        for each in response.xpath('//*[@id="div_datalist"]/li'):
            url=each.xpath("./table/tr[2]/td/table/tr/td[1]/table/tr[2]/td[1]/img/@src").extract()[0]
            cn=each.xpath("./table/tr[2]/td/table/tr/td[1]/table/tr[2]/td[1]/span/text()").extract()[0]
            t=strftime("%Y-%m-%d-%H-%M-%S-%s", gmtime())
            rd=str(random.randint(0,99))
            print cn
            self.save_data(t+rd,cn,'',t+rd+url[len(url)-4:])
            self.save_img(url,t)
            url=each.xpath("./table/tr[2]/td/table/tr/td[1]/table/tr[3]/td[1]/img/@src").extract()[0]
            cn=each.xpath("./table/tr[2]/td/table/tr/td[1]/table/tr[3]/td[1]/span/text()").extract()[0]
            rd=str(random.randint(0,9999))
            print cn
            self.save_data(t+rd,cn,'',t+rd+url[len(url)-4:])
            self.save_img(url,t)
            self.num+=2
  
    def save_data(self,en,cn,big,logo):
        if sqlHelper.selectOne("select id from team  where cn=%s and typeText=%s",(cn,'棒球')):
            sqlHelper.update("update team set logo=%s,source=%s where cn=%s and typeText=%s",(logo,self.start_urls[0],cn,'棒球'))
            return False
        sqlHelper.update("insert into team(typeText,cn,en,logo,big,source) values(%s,%s,%s,%s,%s,%s)",('棒球',cn,en,logo,big,self.start_urls[0]))
        return True
    
    def save_img(self,img_url,file_name,file_path='bangqiu'):
        img_url=img_url.replace('\/','/')
        try:
            if not os.path.exists(file_path):
                print '文件夹',file_path,'不存在，重新建立'
                #os.mkdir(file_path)
                os.makedirs(file_path)
            #获得图片后缀
            file_suffix = os.path.splitext(img_url)[1]
            #拼接图片名（包含路径）
            filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)
            filename=filename.replace('.com','_com')
            if string.find(filename,'.')==-1:
                filename=filename+'.png'
            if string.find(filename,'?')!=-1:
                filename=filename[0:len(filename)-4]
#       m下载图片，并保存到文件夹中
            urllib.urlretrieve(img_url,filename=filename)
        except IOError as e:
            print '文件操作失败',e
        except Exception as e:
            print '错误 ：',e
    def close(self,reason):
        print "采集完成,共采集了["+str(self.num)+"]条数据"
