#!/usr/bin/python
#-*- coding: utf-8 -*-

# spider version 1.0

import urllib
import MySQLdb
import socket
from sgmllib import SGMLParser
from Queue import Queue

class spider:
    pass

def link2ip(url):       # return str(ip)
    import socket
    try:
        return socket.gethostbyname(url)
    except:
        return ''

def getDB():
    return MySQLdb.connect(user='root', passwd='123456', db='xm', host='localhost')

class XWebDB:
    def __init__(self):
        self.db = getDB()
        self.c = self.db.cursor()
        self.tbname='tbweb'
        self.ver = self.getVersion()+1
        
    def getVersion(self):       # 获取网页的版本（代表新旧程度）
        self.c.execute("SELECT MAX(ver) from "+self.tbname)
        self.db.commit()
        dt = self.c.fetchall()[0][0]
        if dt == None: return 1
        else: return dt
        
    def exist(self, link):
        str = "SELECT * FROM " + self.tbname + " WHERE link = %s"
        n = self.c.execute(str, (link))
        self.db.commit()
        return n == 1
    
    def hasVisited(self, link):
        str = "SELECT 1 FROM " + self.tbname + " WHERE link = %s AND ver = %s"
        n = self.c.execute(str, (link, self.ver))
        self.db.commit()
        return n == 1

    def addCount(self, link):
        str = "UPDATE " + self.tbname + " set cnt=cnt+1 WHERE link=%s"
        self.c.execute(str, (link))
        self.db.commit()

    def addRecord(self, link, title, typename):
        ip = link2ip(link)
        str = "INSERT INTO " + self.tbname + " (link, ip, title, ver, type, cnt) VALUES(%s, %s, %s, %s, %s, 0)"
        self.c.execute(str, (link, ip, title, self.ver, typename))
        self.db.commit()

    def updateRecord(self, link, title, typename):
        ip = link2ip(link)
        str = "UPDATE "+ self.tbname + " SET ip=%s, title=%s, ver=%s, type=%s, cnt=0 WHERE link=%s"
        self.c.execute(str, (ip, title, self.ver, typename, link))
        self.db.commit()
        
xdb = XWebDB()


def checkURL(url):	# 正则表达式 检验 url的合法性
    if url == "#": return False
    return True

def repairURL(host, url):
    url = url.strip().rstrip("/").lower()
    if not url.startswith("http://"):
        url = host + url
    return url

class URLListener(SGMLParser):
    def __init__(self, host = "http://"):
        SGMLParser.__init__(self)
        self.host = host
        self.urls = []
        
    def start_a(self, attrs):
        for name, value in attrs:
            if name.lower() == "href" and checkURL(value):
                self.urls.append(repairURL(self.host, value))
                break

def getURL(url):		# 获取网页中的所有url地址, 有些链接还是有问题的
    try:
        listener = URLListener(url)     # 参数url用来修复链接错误
        fp = urllib.urlopen(url)
        listener.feed(fp.read())
    except:
        print "sorry, got something wrong in", url
    return listener.urls
    
def bfs(base_url, maxcnt):	# 广度搜索，抓取界面
    socket.setdefaulttimeout(1)     # timeout = 1s
    cnt = 0
    s = set()
    q = Queue()
    q.put(base_url)
    while(not q.empty() and cnt != maxcnt):
        url = q.get()
        print link2ip(url), url
        if xdb.hasVisited(url):
            print 'vis'
            xdb.addCount(url)
            continue
        elif xdb.exist(url):
            print 'update'
            xdb.updateRecord(url, "baidu", "html")
        else:
            print 'add'
            xdb.addRecord(url, "baidu", "html")

        for u in getURL(url):
            q.put(u)
        else:
            cnt += 1
        

if __name__ == "__main__":
    print '==== start spider ===='
    #bfs("http://www.baidu.com", 300)
    bfs("http://www.cumt.edu.cn", 300)
    




