#-*-coding=utf8-*-
__author__ = 'liyang'

import re
import os
import urllib
import urllib2
import cookielib
from bs4 import BeautifulSoup

# 发送请求(可用)
def getHtml(url):
  page = urllib.urlopen(url)
  return page.read()

# 带data参数发送请求(可用)
def getHtml1(url, data):
  request = urllib2.Request(url)
  response = urllib2.urlopen(request, data)
  return response.read()

# 下载图片(可用)
def downloadImg(html, max):
  reg = r'src="(.+?\.jpg)"'
  imgre = re.compile(reg)
  imglist = re.findall(imgre,html)
  x = 1
  for imgurl in imglist:
    if x < max:
      print 'addPage: ',imgurl
      urllib.urlretrieve(imgurl, '%s.jpg' % x)
    x+=1

# 下载链接
def download(url):
  urllib.urlretrieve(url)

#不存在该文件就创建该文件
def opendir(filePath):
  if not os.path.isdir(filePath):
    os.mkdir(filePath)
# os.path.isfile可以判断文件
# os.path.isdir可以判断目录
# os.path.exists不区分文件还是目录
# 打开文件 返回文件信息
def openFile(files):
  if os.path.exists(files):
    o = open(files, 'r+')
    r = f.read()
    if r: return r

#写入内容进文件
def writeFile(content, filePath):
  f = open(filePath, 'w')
  f.write(content)
  f.close()

#基本模拟登陆(未测试)
def login(username, password, url):
  values = {}
  values['username'] = username
  values['password'] = password
  data = urllib.urlencode(values)
  geturl = url + "?" + data
  request = rullib2.Request(geturl)
  response = urllib2.urlopen(request)
  return response.read()

#字典转换为列表
def dict_to_list(d):
    a = []
    for key, value in d.iteritems():
        if (type(value) is dict):
            value = dict_to_list(value)
        a.append([key, value])
    return a

#设置请求头(未测试)
def setHeaders(username, password):
  url = 'http://www.server.com/login'
  user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
  values = {}
  values['username'] = username
  values['password'] = password
  # headers = {'User-Agent' : user_agent}
  headers = { 'User-Agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)','Referer':'http://www.zhihu.com/articles' }
  data = urllib.urlencode(values)
  request = urllib2.Request(url, data, headers)
  response = urllib2.urlopen(request)
  page = response.read()

#设置代理动态IP(未测试)
def setIp():
  enable_proxy = True
  proxy_hanler = urllib2.ProxyHandler({"http" : 'http://some-proxy.com:8080'})
  null_proxy_handler=urllib2.ProxyHandler({})
  if enable_proxy:
    opener = urllib2.build_opener(proxy_handler)
  else:
    opener = urllib2.build_opener(null_proxy_handler)
  urllib2.install_opener(opener)

#使用Debuglog打印响应头(可用)
def printReqHeaders():
  httpHandler = urllib2.HTTPHandler(debuglevel=1)
  httpsHandler = urllib2.HTTPSHandler(debuglevel=1)
  opener = urllib2.build_opener(httpHandler, httpsHandler)
  urllib2.install_opener(opener)
  response = urllib2.urlopen('http://www.baidu.com')

#发送请求并打印请求错误原因(可用)
def printReqErr(url):
  req = urllib2.Request(url)
  try:
      urllib2.urlopen(req)
  except urllib2.HTTPError, e:
      print e.code
      print e.reason
  except urllib2.URLError, e:
      print e.code
      print e.reason
  else:
      print "OK"

#打印cookie值(可用)
def printCookie():
  cookie = cookielib.CookieJar()
  handler = urllib2.HTTPCookieProcessor(cookie)
  opener = urllib2.build_opener(handler)
  response = opener.open('http://www.baidu.com')
  for item in cookie:
    print 'Name =' + item.name
    print 'Value =' + item.value

#保存cookie值
##声明一个MozillaCookieJar对象实例来保存cookie，之后写入文件
##利用urllib2库的HTTPCookieProcessor对象来创建cookie处理器
##通过handler来构建opener
##ignore_discard的意思是即使cookies将被丢弃也将它保存下来
##ignore_expires的意思是如果在该文件中, cookies已经存在, 则覆盖原文件写入
def saveCookieValue(url="http://www.baidu.com"):
  filename = 'cookie.txt'
  cookie = cookielib.MozillaCookieJar(filename)
  handler = urllib2.HTTPCookieProcessor(cookie)
  opener = urllib2.build_opener(handler)
  response = opener.open(url)
  cookie.save(ignore_discard=True, ignore_expires= True)

#读取cookie并访问
def getCookieValue():
  cookie = cookielib.MozillaCookieJar()
  cookie.load('cookie.txt', ignore_discard=True, ignore_expires=True)
  req = urllib2.Request("http://www.baidu.com")
  opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
  response = opener.open(req)

# 正则表达式(可用) match search
def RegMatch():
  pattern = re.compile(r'hello')
  testVal = ['hello CQC!','hello','helloo CQC!','aa hello CQC!']
  for everyVal in testVal:
    match_test = re.match(pattern, everyVal)
    search_test = re.search(pattern, everyVal)
    if match_test:
      print 'Match from the first word: '+match_test.group()
    if search_test:
      print 'Search from the all of words:'+search_test.group()

# 正则表达式分割 split
## 用数值分割返回列表
def RegSplit():
  pattern = re.compile(r'\d')
  print re.findall(pattern, 'one1two2three3four4')
# 匹配所有字符串，以列表的形式返回匹配的字符串 findall
def RegFindall():
  pattern = re.compile(r'\d+')
  print re.findall(pattern, 'one1two2three3four4')
# 按顺序返回，匹配的数字
def RegFinditer():
  pattern = re.compile(r'\d+')
  for m in re.finditer(pattern, 'one1two2three3four4'):
    print m.group()

# beautifu soup(python jquery)
# print soup.prettify() #格式化打印输出
# if type(soup.a.string)==bs4.element.Comment:
#   print soup.a.string
# print soup.name #打印html的名称
# print soup.attrs #打印html的属性
# print type(soup.name) #打印名称类型
# 1打印各种节点
## print soup.title
## print soup.head
## print soup.a
## print soup.p
# 2打印类型
## print type(soup.a)
# 3打印name
## print soup.name
## print soup.head.name
# 4打印attrs   .get()
## print soup.p.attrs
## print soup.p['class']
## print soup.p.get('class')
## 修改属性 soup.p['class']="newClass" print soup.p
## 删除属性 del soup.p['class'] print soup.p
# 5打印标签内部文字   .string
## print soup.p.string
## print type(soup.p.string)
# 6获取1个节点返回一个列表   .contents
## print soup.head.contents
## print soup.head.contents[0]
# 7打印一个节点下所有子节点   .children
## for child in soup.body.children:
##   print child
# 8打印一个节点的所有子孙节点
## for child in soup.descendants:
##   print child
# 9打印一个节点的父节点
## print soup.p.parent.name
## content = soup.head.title.string
## print content.parent.name
# 10打印所有父节点
# content = soup.head.title.string
# for parent in content.parents:
#   print parent.name
# 11打印下一个兄弟节点 .next_sibling 所有兄弟节点.next_siblings
## print soup.p.next_sibling
# 12 打印上一个兄弟节点 .prev_sibling 所有兄弟节点.next_siblings
## print soup.p.prev_sibling
# 13 前后节点（所有）
## print soup.head.previous_element 前节点
## print soup.head.next_element     后节点
# 14 搜索文档树find_all(name, attrs,recursive, text,**kwargs)
# http://python.jobbole.com/81349/
# 10遍历打印所有内容和节点
# for string in soup.strings:
#   print string
# for content in soup.contents:
#   print content
# soup = BeautifulSoup(open('index.html'))
# print soup.find_all(id='link2')
# print soup.find_all(href=re.compile("elsie"))
# print soup.find_all('a')
# class要加下划线
# print soup.find_all("a", class_="sister")
# 获取自定义参数
# print soup.find_all(attrs={"data-foo": "value"})
# print soup.find_all(text=["next"])
# 限制返回数量
# print soup.find_all("a", limit=2)
# 只寻找子节点不寻找孙节点
# print soup.html.find_all("title", recursive=False)
# css选择器 从类名寻找 .select('title') .select(#link1)