# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     start_spide.py
   Description :  抓取免费代理
   Author :       cherry
   date：          2017/5/23
      代理获取的相关代码，
      目前抓取了66代理、快代理 有代理、西刺代理、无忧代理 、代理IP检测平台、年少代理、云代理、dlip网站  这九个网站的免费代理，
      更新时间在半小时内的有无忧代理、代理IP检测平台、西刺代理、年少代理 、66代理IP 五个网站 
      因此定时任务执行的就这五个，
      
-------------------------------------------------
   Change Activity:
                   2017/5/23
               
-------------------------------------------------
"""
import re
import requests
import  random
from bs4 import BeautifulSoup
import threading 
from db import DataBase as db
from constant import agent


current_proxy = ""



'''爬取66ip网址的代理IP   100个'''
def get_66ip():
    newlist=[]
    url = 'http://www.66ip.cn/mo.php?sxb=&tqsl=100&port=&export=&ktip=&sxa=&submit=%CC%E1++%C8%A1&textarea='
    web_data = requests.get(url)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ps = str(soup.find('p'))
    iplistn=re.findall(r'([0-9].*?)<br',ps,re.S)   #从html代码中获取所有/><b中的内容 re.S的意思是匹配包括所有换行符
    for ad in iplistn:
        db.add_new_proxy(ad)


'''
爬取西刺网站的代理IP   API
API链接中的IP地址每15分钟更新一次
总数为  100
'''
def get_xicidaili():   
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串z
    header={"User-Agent":ua}
    url = 'http://api.xicidaili.com/free2016.txt'
    html=requests.get(url=url, headers=header, timeout=5)
    soup =BeautifulSoup(html.content,"lxml")
    trs = soup.find('p').text
    proxys = trs.split("\r\n")
    print ("爬取西刺网站的代理IP")
    proxys_list=[]
    for proxy in proxys:
        url_ip=proxy       
        proxys_list.append(url_ip)
        db.add_new_proxy(url_ip)
#     db.add_new_proxys(proxys_list)

    
           
'''
获取快代理的代理IP
快代理的代理IP可以获取的
总数是  100  更新速度慢
'''
def get_kuaidaili(): 
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    for i in range(1,10): 
        url = 'http://www.kuaidaili.com/proxylist/%s/'%i
            # 页数不用太多， 后面的全是历史IP， 可用性不高
        html = requests.get(url=url, headers=header, timeout=4)
        soup=BeautifulSoup(html.text,"lxml")
        ip=soup.find_all('td',attrs={'data-title':'IP'})        #获取到每页的
        port=soup.find_all('td',attrs={'data-title':'PORT'})
        proxys_list=[]
        print ("获取快代理的代理IP")
        for i in range(0,len(ip)):
            gip=str(ip[i].text)+':'+str(port[i].text)
            url_ip=gip
            proxys_list.append(url_ip)
            db.add_new_proxy(url_ip)
#         db.add_new_proxys(proxys_list)




'''
抓取有代理 http://www.youdaili.net/Daili/http/
有代理的IP按天更新  爬取最新一天的代理IP   
总共500条
'''
def get_youdaili():
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    for i in range(1,6):
        url="http://www.youdaili.net/Daili/http/"
#         if i==1:
#             url = "http://www.youdaili.net/Daili/http/36782.html"
#         else :
#             url = "http://www.youdaili.net/Daili/http/36782_%s.html"%i
        html=requests.get(url=url, headers=header, timeout=2)
        soup=BeautifulSoup(html.content,"lxml")
        all_p=soup.find_all("p")
        latest_url=all_p[0].find('a',attrs={'href':True}).attrs['href']   #获取到最新日期的url
        for i in range(1,6):
            if i==1:
                url=latest_url
            else :
                url.replace(".html", "_"+str(i)+".html")
            html2=requests.get(url=latest_url, headers=header)
            soup2=BeautifulSoup(html2.content,"lxml")
            contents=soup2.find('div',attrs={'class':"content"})
            a=contents.find_all('p')
            print ("获取有代理网站的IP")
            proxys_list=[]
            for i in range(0,len(a)):
                proxy = re.findall(r'(.*)@.*', a[i].text)  #获取最新日期的代理IP
                url_ip=proxy[0]
                proxys_list.append(url_ip)
                db.add_new_proxy(url_ip)
        #         db.add_new_proxys(proxys_list)

            
'''
抓取无忧代理 
总数20个
更新时间在在10分钟内
'''       
def get_data5u():
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    url = "http://www.data5u.com/"
    html=requests.get(url=url, headers=header, timeout=2)
    soup=BeautifulSoup(html.content,"lxml")
    all_li=soup.find_all("ul",attrs={'class':"l2"})
    proxys_list=[]
    print ("抓取无忧代理网站的IP")
    for one in all_li:
        ip=one.find("li").text+":"+one.find("li",attrs={'class':"port"}).text
        url_ip=ip
        proxys_list.append(url_ip)
        db.add_new_proxy(url_ip)
#     db.add_new_proxys(proxys_list)

    
    
'''
获取代理IP检测平台的IP
大约90个 验证时间在10分钟以内
'''       

def get_ip181():
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    url = "http://www.ip181.com/"
    html=requests.get(url=url, headers=header, timeout=2)
    soup=BeautifulSoup(html.content,"lxml")
    tbody=soup.find("tbody")
    trs1=tbody.find_all("tr")
    proxys_list=[]
    print ("获取代理IP检测平台的IP")
    for tr in trs1:
        tds=tr.find_all("td")
        ip=tds[0].text
        port =tds[1].text
        proxy=ip+":"+port
        url_ip=proxy
        proxys_list.append(url_ip)
        db.add_new_proxy(url_ip)
#     db.add_new_proxys(proxys_list)

    
''' 
第一次执行的时候 获取多页 30页
http请求的  年少网站代理IP  http://www.nianshao.me/?stype=1&page=1 获取当天更新的代理IP
https请求的  年少网站代理IP  http://www.nianshao.me/?stype=2&page=1  获取当天更新的代理IP
'''     
def get_nianshao_all():

    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    print ("年少网站代理IP")
    '''style=1表示获取的是http请求的'''
    for j in range(1,3):
        for i in range(1,31):
            url= "http://www.nianshao.me/?stype={}&page={}".format(j, i)
            html=requests.get(url=url, headers=header, timeout=2)
            soup=BeautifulSoup(html.content,"lxml")
            trs=soup.find("tbody").find_all("tr")
            proxys_list=[]
            for tr in trs:
                tds=tr.find_all("td")
                ip=tds[0].text
                port=tds[1].text
                proxy=ip+":"+port
                url_ip=proxy   
                proxys_list.append(url_ip)
                db.add_new_proxy(url_ip)   
'''
定时任务执行时 执行此方法 
http请求的  年少网站代理IP  http://www.nianshao.me/?stype=1&page=1  15分钟更新一页  每页20条数据     每次只获取一页的数据
https请求的  年少网站代理IP  http://www.nianshao.me/?stype=2&page=1  15分钟更新一页  每页20条数据     每次只获取一页的数据
'''        
def get_nianshao():

    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    print ("年少网站代理IP")
    for i in range(1,3):       
        url= "http://www.nianshao.me/?stype=%s&page="%i
        html=requests.get(url=url, headers=header, timeout=2)
        soup=BeautifulSoup(html.content,"lxml")
        trs=soup.find("tbody").find_all("tr")
        proxys_list=[]
        for tr in trs:
            tds=tr.find_all("td")
            ip=tds[0].text
            port=tds[1].text
            proxy=ip+":"+port
            url_ip=proxy   
            proxys_list.append(url_ip)
            db.add_new_proxy(url_ip)
#         db.add_new_proxys(proxys_list)

    
'''网站http://www.dlip.cn'''       
def get_dlip():
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    urls=["http://www.dlip.cn/gnp/","http://www.dlip.cn/gng/","http://www.dlip.cn/gwg/","http://www.dlip.cn/gwp/","http://www.dlip.cn/SOCKS/"]
    print ("网站www.dlip.cn代理IP")
    for url in urls:
        html=requests.get(url=url, headers=header, timeout=2)
        soup=BeautifulSoup(html.content,"lxml")
        ip_list=soup.find("table",attrs={'id':"ip_list"})
        trs=ip_list.find_all("tr")
        proxys_list=[]
        for i in range(1,len(trs)-1):
            tds=trs[i].find_all("td")
            ip=tds[1].text
            port=tds[2].text
            proxy=ip+":"+port
            url_ip=proxy  
            proxys_list.append(url_ip)
            db.add_new_proxy(url_ip)
#         db.add_new_proxys(proxys_list)

        
        
        
'''获取云代理IP网站的代理IP
一共2页
共 20 条
其中有重复的
更新时间约一天
'''           
def get_yundaili():
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    all_proxy=[]
    print ("获取云代理IP网站的代理IP")
    for i in range(0,10):
        url="http://www.ip3366.net/free/?stype=1&page=%s"%i
        html=requests.get(url=url, headers=header, timeout=5)
        soup=BeautifulSoup(html.content,"lxml")
        div=soup.find("div",attrs={'id':"list"})
        tbody=div.find("tbody")
        trs=tbody.find_all("tr")
        proxy_list=[]
        for tr in trs:
            tds=tr.find_all("td")
            ip=tds[0].text
            port=tds[1].text
            proxy=ip+":"+port
            proxy_list.append(proxy)
            db.add_new_proxy(ip)
        all_proxy.extend(proxy_list)
    all_proxy=list(set(all_proxy))
           
#         db.add_new_proxys(proxys_list)  

def get_baizhongsou():
    '''
    http://ip.baizhongsou.com/
    30分钟更新一次
    '''
    ua=random.choice(agent.user_agent_list)   #从agent.user_agent_list中随机抽取出一个字符串
    header={"User-Agent":ua}
    url = "http://ip.baizhongsou.com/"
    html=requests.get(url=url, headers=header, timeout=2)
    soup=BeautifulSoup(html.content,"lxml")
    div=soup.find("div",attrs={"class":"daililist"})
    trs=div.find_all("tr") 
    for i in range(1,len(trs)):
        if i%2!=0:
            tds=trs[i].find_all("td")
            url_ip=tds[0].text
            db.add_new_proxy(url_ip)
   
  
    
    
'''程序开始运行时执行此方法'''                                   
def run():

    threads = []       
    threads.append(threading.Thread(target=get_youdaili))

    threads.append(threading.Thread(target=get_xicidaili))
          
    threads.append(threading.Thread(target=get_kuaidaili))
      
    threads.append(threading.Thread(target=get_data5u))
      
    threads.append(threading.Thread(target=get_ip181))
      
    threads.append( threading.Thread(target=get_nianshao_all))
      
    threads.append(threading.Thread(target=get_dlip))
      
    threads.append(threading.Thread(target=get_yundaili))
      
    threads.append(threading.Thread(target=get_66ip))
    
    threads.append(threading.Thread(target=get_baizhongsou))
        
    for t in threads:
        t.start()

'''定时任务启动后  执行的方法'''
def run_sch():

    threads = []       

    threads.append(threading.Thread(target=get_data5u))
    
    threads.append(threading.Thread(target=get_xicidaili))
        
    threads.append(threading.Thread(target=get_ip181))
    
    threads.append( threading.Thread(target=get_nianshao))
    
    threads.append(threading.Thread(target=get_66ip))
    
    threads.append(threading.Thread(target=get_baizhongsou))
      
    for t in threads:
        t.start()
            
            

'''

http://www.xker.com/ip/

http://www.httpdaili.com/


'''
