import urllib.request
import urllib.parse
from lxml import etree # 解析xpath

##### 建立请求地址 ######
def create_request():
  # 要采集的网址
  url = "http://www.yedushu.com"

  headers = {
      # 'Accept': '*/*',
      # 'Accept-Encoding': 'gzip, deflate, br',
      # 'Accept-Language': 'zh-CN,zh;q=0.9',
      # 'Connection': 'keep-alive',
      # 'Content-Length': '135',
      # 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
      'Cookie': 'sdfsdfsdfsdfsdf',
      # 'Host': 'fanyi.baidu.com',
      # 'Origin': 'https://fanyi.baidu.com',
      # 'Referer': 'https://fanyi.baidu.com/?aldtype=16047',
      # 'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
      # 'sec-ch-ua-mobile': '?0',
      # 'Sec-Fetch-Dest': 'empty',
      # 'Sec-Fetch-Mode': 'cors',
      # 'Sec-Fetch-Site': 'same-origin',
      'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
      # 'X-Requested-With': 'XMLHttpRequest',
  }

  data = {
      'wd':'周杰伦',
      'sex':'男',
      'location':'中国台湾省'
  }

  # 路径参数
  # new_data = urllib.parse.urlencode(data)
  # 带参数的路径
  # url = url + new_data

  # post请求的参数,必须进行编码并且要调用encode方法
  # data = urllib.parse.urlencode(data).encode('utf-8')
  # 请求对象的定制
  # request = urllib.request.Request(url = url, data = data, headers = headers)
  
  # 请求对象的定制
  request = urllib.request.Request(url = url, headers = headers)
  return request

##### 获取页面内容 #####
def get_content(request):
  # 模拟浏览器向服务器发送请求
  response = urllib.request.urlopen(request)
  # response = prox(request)
  # 获取响应中的页面的源码
  content = response.read().decode('utf-8')
  return content

#### 定义代理服务器池 #####
def prox(request):
  proxies_pool = {
    {'http':'118.24.219.151:16817'},
    {'http':'118.24.219.151:16817'},
  }
  import random
  proxies = random.choice(proxies_pool)  
  # 获取hanlder对象
  handler = urllib.request.ProxyHandler(proxies = proxies)
  # 获取opener对象
  opener = urllib.request.build_opener(handler)
  # 调用open方法
  response = opener.open(request)
  return response

##### 解析网页源码 来获取我们想要的数据 #######
## content: 获取到的网页源码
def get_datas(content):  
  # 解析服务器响应的文件
  tree = etree.HTML(content)
  # 获取想要的数据
  result = tree.xpath('//input[@id="su"]/@value')[0]
  return result

#### 下载文件 #####
def down_load(content):
  tree = etree.HTML(content)  
  name_list = tree.xpath('//div[@id="container"]//a/img/@alt')
  # 一般设计图片的网站都会进行懒加载
  src_list = tree.xpath('//div[@id="container"]//a/img/@src2')
  for i in range(len(name_list)):
    name = name_list[i]
    src = src_list[i]
    url = 'https:' + src
    # 下载图片 urllib.request.urlretrieve('图片地址','文件的名字')
    urllib.request.urlretrieve(url = url, filename='./loveImg/' + name + '.jpg')

##### 程序入口 #####
if __name__ == "__main__":
  request = create_request()
  content = get_content(request)

  # 打印数据
  print(content)