# coding=utf-8
# __author__ = 'chenbinghui'
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import re
import urllib
import os
import urllib2
import random
import time

#处理页面标签类
class ToolUtil:
    def __init__(self):
        self.user_agent_list = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
        # 为了防止后台对访问进行统计，如果单个userAgent访问超过阈值，予以封锁。
        # （效果出奇的棒！不过误伤也超级大，一般站点不会使用，不过我们也考虑进去
        # 随机的User-Agent
        UA = random.choice(self.user_agent_list)
        self.header = {'User-Agent': UA}
        #IP代理列表
        self.ipList=[]
    #去除img标签,7位长空格
    removeImg = re.compile('<img.*?>| {7}|')
    #删除超链接标签
    removeAddr = re.compile('<a.*?>|</a>')
    #把换行的标签换为\n
    replaceLine = re.compile('<tr>|<div>|</div>|</p>')
    #将表格制表<td>替换为\t
    replaceTD= re.compile('<td>')
    #把段落开头换为\n加空两格
    replacePara = re.compile('<p.*?>')
    #将换行符或双换行符替换为\n
    replaceBR = re.compile('<br><br>|<br>')
    #将其余标签剔除
    removeExtraTag = re.compile('<.*?>')
    def replace(self,x):
        x = re.sub(self.removeImg,"",x)
        x = re.sub(self.removeAddr,"",x)
        x = re.sub(self.replaceLine,"\n",x)
        x = re.sub(self.replaceTD,"\t",x)
        x = re.sub(self.replacePara,"\n    ",x)
        x = re.sub(self.replaceBR,"\n",x)
        x = re.sub(self.removeExtraTag,"",x)
        #strip()将前后多余内容删除
        return x.strip()
    # 获取索引页面的内容
    def getPage(self, url):
        request = urllib2.Request(url, headers=self.header)
        response = urllib2.urlopen(request)
        content = response.read().decode('utf-8')
        return content
    #通过代理去获取页面内容
    def getPageWithProxy(self,url,proxy=None,num_retries=6):
        print '开始获取url:',url
        if proxy==None:
            try:
                return self.getPage(url)
            except:
                if num_retries > 0:
                    time.sleep(10)#延迟10秒
                    print '获取页面出错，10s后将获取倒数：',num_retries
                    return self.getPageWithProxy(url,num_retries-1)
                else:
                    print '开始使用代理'
                    time.sleep(10)
                    IP = str(random.choice(self.ipList))
                    proxy = {'http':IP}
                    return self.getPageWithProxy(url,proxy)
        else:##代理不为空
            try:
               IP = str(random.choice(self.ipList))
               proxy = {'http': IP}
               return self.getPageWithProxy(url, proxy)
            except:
                if num_retries >0:
                    time.sleep(10)
                    IP = str(random.choice(self.ipList))
                    proxy = {'http': IP}
                    return self.getPageWithProxy(url, proxy,num_retries-1)
                else:
                    print '代理也好使了，取消算了'
                    return self.getPage(url)

    #存入图片数据
    def saveImg(self,imageURL,fileName):
        u = urllib.urlopen(imageURL)
        data = u.read()
        f = open(fileName,'wb')
        f.write(data)
        f.close()
    #存入文本文件
    def saveText(self,content,name):
        fileName = name+"/"+name+'.txt'
        f = open(fileName,'a')
        print u'正在进行保存content数据信息',fileName
        f.write(content.encode('utf-8')+'\n')
    #创建新目录
    def mkdir(self,path):
        path = path.strip()
        #判断路径是否存在
        #存在 true
        #不存在 false
        isExists = os.path.exists(path)
        if not isExists:
            os.mkdir(path)
            return True
        else:
            return False
    #获取到最后一次出现的字符
    def find_last(self,string, str):
        last_position = -1
        while True:
            position = string.find(str, last_position + 1)
            if position == -1:
                return last_position
            last_position = position
    #获取到伪装的ip代理服务
    def getIpProxy(self):
        url = 'http://www.kuaidaili.com/ops/proxylist/1/'
        content = self.getPage(url)
        pattern = re.compile('<tr.*?<td data-title="IP">(.*?)</td>.*?PORT">(.*?)</td>', re.S)
        items = re.findall(pattern, content)
        for item in items:
            self.ipList.append(item[0]+':'+item[1])
        return self.ipList


tool = ToolUtil()
tool.getIpProxy()
#url = 'www.bbb.com/chen/ss/ddd'
#print url[tool.find_last(url,'/')+1:]
