import json
import  requests
from bs4 import  BeautifulSoup
import pprint
import urllib
import re
import os
from urllib.request import urlretrieve
from  lxml import etree
import json
jsonString='{"user_man":[{"name":"peter"},{"name":"xiaoming"}],"user_woman":[{"name":"Anni"},{"name":"Anni"}]}'
jsondata=json.loads(jsonString)
print(jsondata.get("user_man"))
print(jsondata.get("user_man")[0].get("name"))

url_path='https://www.pexels.com/search/'

header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
# word=input('请输入你要搜索的图片')
# path='D://grabpics/'
# url=url_path+word+'/'
# res=requests.get(url,header)
# soup=BeautifulSoup(res.text,'lxml')
# #print(res.text)
# imgs=soup.select('article > a > img')
# list=[]
# print(len(imgs))
# for img in imgs:
#     photo=img.get('src')
#     list.append(photo)
# for item in list:
#     data=requests.get(item,header)
#     print(item)
#     fpr =open(path+item.split('?')[0][-10:],'wb')
#     fpr.write(data.content)
#     fpr.close()



# word=input('请输入中文：')  # 官方接口挂了....
# url='http://howtospeak.org:433/api/j2c?user_key=6f7bb4571aba2f8cecb82a1e53d96d02&notrans=0&text={}'.format(word)
# res=requests.get(url)
# json_data=json.loads(res.text)
# english_word=json_data['englishi']
# print(english_word)



# ---------------------------这个能用--------------------------------
# address=input("请输入地点:")
# par={'address':address,'key':'cb649a25c1f81c1451adbeca73623251'}
# res=requests.get('http://restapi.amap.com/v3/geocode/geo',par)
# json_data=json.loads(res.text);
# print(json_data)
# geo=json_data['geocodes'][0]['location']
# longtitude=geo.split(',')[0]
# latitude=geo.split(',')[1]
# print(longtitude,latitude)


#----------------图片爬取 ---------------------------------
# header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
# download_links=[]
path='D://grabpics/'
# url='http://www.mzitu.com'
# res=requests.get(url,header)
# soup=BeautifulSoup(res.text,'lxml')
# imgs=soup.select(' li > a > img')
# for img in imgs:
#     print(img.get('data-original'))
#     download_links.append(img.get('data-original'))
# for item in download_links:
    # print(item)
  #  urlretrieve(item,path+item[-10:])


# ----------------方法二----------------




## 图片使用了 禁止爬虫的措施
# def get_info(url):
#     html=requests.get(url,header)
  #  selector=etree.HTML(html.text)
  #  print(html.text)
    # photo_urls=selector.xpath('//p/a[@class="view_img_link"]/@href')
    # print(photo_urls)
    # for photo_url in photo_urls:
    #     data=requests.get('http:'+photo_url,header)
    #     fp=open(path+photo_url[-10:],'wb')
    #     fp.write(data.content)
    #     fp.close()
#
# for url in urlses:
#    print(url)
#    get_info(url)

#232c588eeeaa5246d0fd7d544f33d66a   userkey

#当前最大页数
#!/usr/bin/python
#encoding:utf-8

#当前最大页数
url='http://jandan.net/pic'
data=urllib.request.urlopen(url).read().decode('utf-8')
max_page_num=re.findall(r'<span class.*current-comment-page.*?>\[(.*?)\]</span>',data)[1]
def download_pic(start_page,stop_page,download_file):
    for num in range(start_page,stop_page):
        os.chdir(download_file)#图片保存目录
        url='http://jandan.net/pic/page-%s' % num
        urllib.request.urlopen(url).read().decode('utf-8')
        pics=re.findall(r'<a href="//(.*?)" target.*?</a><br />',data, re.I|re.S|re.M)
        for i in pics:
            i='http://'+i
            r=requests.get(i)
            pic_name=i[28:100]
            with open(pic_name,'wb') as f:
                f.write(r.content)
                f.close()


download_pic(int(10),int(20),path)
