﻿import urllib2
import re
from sgmllib import SGMLParser
import socket
import time

server='166.111.180.202'
port=1088
finish=0
exit=0
count=0


def gets():
	s=socket.socket()
	s.connect((server,port))   #与服务器程序ip地址和端口号相同
	return s

rep=re.compile(r'[/:\*?"<>|]')
def legal(url):
	tmp=rep.subn(" ",url)
	tmp=tmp[0].strip()
	return tmp
	
	
def geturl():
	global exit
	s=gets()
	s.send("get")
	buff=""
	while True:
		data=s.recv(512)
		if data.find("!")>=0:#finished
			data=data.replace("!","")
			buff=buff+data
			break
		if data.find("*")>=0:
			exit=1
			break
		buff=buff+data
	s.close()
	return buff.split(",")

def sendurl(urls):
	s=gets()
	for i in range(1,10):
		s.send("send")
		data=s.recv(512)
		if data=="OK":
#			print "OK\n"
			for url in urls:
				s.send(url+",")
#				print "send",url
			s.send("!")#send finished 
#			print "send","over"
			break
		else:
			time.sleep(10)
	s.close()
			
class URLLister(SGMLParser):
	def reset(self):
		SGMLParser.reset(self)
		self.urls = []
	def start_a(self, attrs):
		href = [v for k, v in attrs if k=='href']
		if href:
			self.urls.extend(href)
			
def spider(urls):
	global count
	newurls=set()
	for url in urls:
		if len(url)<10:		
			print "wrong url:",url
			continue
		try:
			urlfile=urllib2.urlopen(url)
			count+=1
		except:
			print url
			continue
	#保存网页
		#print "open:",url
		content=urlfile.read()
		filename=legal(url)
		filename="pages/"+filename[0:100]
		#如果不存在就返回False
		if not os.path.isfile(filename):
			pf=open("pages/"+filename,"w")
			pf.write(content)
#################
		print "downloaded:",url
#################
	#分析网页链接
		parser = URLLister()
		try:
			parser.feed(content)
		except:
			print  "analysis urls error   ",url
			continue
		for url in parser.urls :
			if url.find("blog.sina.com.cn",0)>=0 and url.find("photo.blog.sina.com.cn",0)<0 and (url not in newurls):
					newurls.add(url)
	return newurls
	
def printlog(plog,text,urls):
	plog.write(text+"\n")
	if urls!=None:
		for url in urls:
			plog.write(url+"\n")



######################
while True:
	urls=geturl()
	if exit==1:
		print "finished because of the server had sent the signal"
		break
	##########
	print "Got from server: ",len(urls),"urls"
#	for url in urls:
#		print url
	##########
	if urls==None:
		finish=finish+1
		print "finished added",finish
		if(finish>10):
			print "finished because of cant't get urls"
			break
		time.sleep(10)
		continue
	else:
		finish=0
	newurls=spider(urls)
###########	
	print "total downloaded:",count
	print "Send to the server:"
	for url in newurls:
		print url
##########
	sendurl(newurls)
print "total num",count

#tp=["http://photo.blog.sina.com.cn/qingwu","http://vhead.blog.sina.com.cn/player/outer_player.swf?auto=1&vid=3024841&uid=1250256972"]
#spider(tp)
