#!/usr/bin/python
#获取百度url
from urllib import request
from urllib import parse
from bs4 import BeautifulSoup
import threading
import time

isMax=10000

class UHack(object):
    def __init__(self,searchName,page):
        self.searchName=parse.quote(searchName)
        self.page=page
        self.urllist=[]
    def UHSpader(self):
        try:
            list=self.getPage()
            self.save_txt(list)
        except:
            print("getpage失败")

    def getPage(self):
        #通过url返回一页html
        url="https://www.baidu.com/s?wd="+self.searchName+"&pn="+str(self.page)+"0&oq="+self.searchName+"&ie=utf-8&usm=3&rsv_idx=1&rsv_pq=ff9b6b810003a8e4&rsv_t=a46cW3eallMjjiUN9IysAiCPGnuUkpBVLnFUv1uyUdbl2iHWZx0uUwdCJec"
        req=request.Request(url)
        req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0')
        try:
            openPage=request.urlopen(req,timeout=5)
        except:
            print("连接失败，百度url失效或拒绝访问！","---->",self.page)
        # ...得到一个beautifulsoup对象
        bes=BeautifulSoup(openPage,"html.parser")
        #判读是否为最后一页,isMax为最后一页
        div=bes.find(id="page")
        if div.contents[11].name=="strong":
            isMax=self.page
        #爬取这一页的url
        h3=bes.findAll("h3")
        for h in h3:
            try:
                _url=request.urlopen(h.contents[0]["href"],timeout=5).geturl()
                self.urllist.append(_url)
                print(_url)
            except:
                print("获取url失败，可能原因是网址失效！")
        return self.urllist
     #保持url到txt文件
    def save_txt(self,list):
        try:
            txt=open("urllist.txt","a")
            for l in list:
                txt.write(l+"\n")
            txt.close()
        except:
            print("文件操作失误！")

#用于多线程启动
def start(searchName,page):
    UHack(searchName, page).UHSpader()


if __name__=="__main__":
    thread_num=70
    Threads=[]
    for i in range(thread_num):
        try:
            t = threading.Thread(target=start, args=("inurl:php?id=",i))
            Threads.append(t)
        except:
            print("线程创建完毕")
            break
    for i in range(thread_num):
        Threads[i].start()
    for i in range(thread_num):
        Threads[i].join()