#coding=utf-8
import json
import sys
import os
import requests
import urllib2
# import urllib.request
#下载资源
data=[]
date_file=open(os.getcwd()+"/data.txt")
for line in date_file:
    line=line[line.find("https://"):]
    data.append(line.replace("\n",""))
date_file.close()
for down_url in data:
    #print down_url
    #资源保存目录
    #如果本地存在 则不下载
    save_path=os.getcwd()+"/res/"  
    save_path=save_path+down_url[down_url.find("https://")+9:]
    
    # save_path=save_path[:save_path.rfind("?v=1")]
    
    suffix=save_path[save_path.rfind('.')+1:]
    if suffix=="png" or suffix=="plist" or suffix=="json" or suffix=="fnt" or suffix=="mp3" or suffix=="jpg" or suffix=="MP3" or suffix=="zip" or suffix=="plist" or suffix=="js" or suffix=="ts":
        isExists2=os.path.exists(save_path)
        if not isExists2:
            print(down_url)
            try:
                f = urllib2.urlopen(down_url)
                data = f.read()
                local_save_path=save_path[0:save_path.rfind('/')]
                isExists=os.path.exists(local_save_path)
                if not isExists:
                    os.makedirs(local_save_path)
                with open(save_path, "wb") as code:
                    code.write(data)
            except urllib2.URLError, err:
                print('res http require error---'+down_url)
