import requests
import json
from bs4 import BeautifulSoup
import urllib.request
import os.path
import xlrd
# response = requests.get("http://www.baidu.com/")
#print (response.text)
# 也可以这么写
# response = requests.request("get", "http://www.baidu.com/")
data = xlrd.open_workbook('D://hangpai.xls')
table = data.sheets()[0]          #通过索引顺序获取
nrows = table.nrows
table.row_values(0)
fileids = []
exceptids =[]
for i in range(nrows):
    row = table.row_values(i)   
    #ids =  row.split(',')
    for id in row:
        ids = id.split(',')
        for i in ids:
            if i not in fileids:
                print(i)
                fileids.append(i)
print(len(fileids))
for i in fileids:
    try:
        urllib.request.urlretrieve('https://bingo.longhu.net/file/downloadasync?fileId='+i , "D:\\img\\"+i +'.jpg')
    except IOError:
        exceptids.append(i)
        print ("Error: 没有找到文件或读取文件失败" + i)
# python 获取文件路径中的文件名
#urllib.request.urlretrieve('https://bingo.longhu.net/file/downloadasync?fileId=12fcb5a3-70c5-4dad-b0fe-44f8ad6250f2' , "D:\\img\\12fcb5a3-70c5-4dad-b0fe-44f8ad6250f2")
    #print (i.get('src'))