#coding=gbk
import requests
import Queue
import threading
from bs4 import BeautifulSoup
import sys
import re
#https://www.baidu.com/s?wd=pycharm&pn=50

home_pages = []
url_pages = []
class urlSpider(threading.Thread):

    def __init__(self,queue):
        threading.Thread.__init__(self)
        self.queue=queue

    def run(self):
        while not self.queue.empty():
            url = self.queue.get()
            self.spider(url)

    def spider(self,url):
        k = 1
        head={"User-Agent":'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}
        soup=BeautifulSoup(requests.get(url=url,headers=head,timeout=1).content,'lxml')
        s=soup.find_all("a",class_=False,attrs={"data-click":re.compile(".*")})
        for i in s:
            try:
                url_target=requests.get(url=i["href"],headers=head,timeout=2)
            except:
                continue
            if url_target.status_code==200:
                url_pages.append(url_target.url)
                print url_target.url
                url_temp=url_target.url.split("/")
                home_page=url_temp[0]+"//"+url_temp[2]
                if home_page not in home_pages:
                    home_pages.append(home_page)
                    print home_page
def write(word):
    file_home=open(word+"(homePage).txt","w")
    for i in home_pages:
        file_home.write(i)
        file_home.write("\n")
    file_home.close()
    file_url=open(word+"(urlpage).txt","w")
    for i in url_pages:
        file_url.write(i)
        file_url.write('\n')
    file_url.close()

def main():
    word = raw_input("you want to search :").encode("utf-8")
    queue = Queue.Queue()
    threads=[]
    thread_count=10
    for i in range(0,270,10):
        queue.put("https://www.baidu.com/s?wd="+word+"&pn="+str(i))
    for i in range(thread_count):
        threads.append(urlSpider(queue))
    for i in range(thread_count):
        threads[i].start()
    for i in range(thread_count):
        threads[i].join()
    write(word)
    print len(home_pages)
    print len(url_pages)

if __name__=="__main__":
    main()