#coding=utf-8

import MySQLdb
import datetime
import hashlib
import urllib2
from BeautifulSoup import BeautifulSoup, SoupStrainer
import re
import threading
from StringIO import StringIO
from Queue import Queue
import time
import socket

class DBManager(object):
    conn = MySQLdb.connect(host="localhost", user="root", passwd="123456", \
    db="fetchcomic",use_unicode=1, charset='utf8')

    def insertConetntUrl(self,url):
        sql = "INSERT INTO getcontent(url, createtime) VALUES(%s, %s)"
        param = (url,str(datetime.datetime.now()))
        self.__insertUrl(sql,param)

    def insertGetUrl(self,url):
        sql = "INSERT INTO geturl(url, times, createtime) VALUES(%s, %s, %s)"
        param = (url, 1, str(datetime.datetime.now()))
        self.__insertUrl(sql,param)

    def insertMd5key(self,url):
        sql = "INSERT INTO usedurl(md5key) VALUES(%s)"
        m = hashlib.md5(url)
        param = (m.hexdigest())
        self.__insertUrl(sql,param)

    def __insertUrl(self,sql,param):
        cursor = DBManager.conn.cursor()
        n = cursor.execute(sql,param)
        DBManager.conn.commit()

    def uniqueUrl(self, url):
        m = hashlib.md5(url)
        cursor = DBManager.conn.cursor()
        sql = "SELECT id FROM usedurl where md5key = '"+m.hexdigest()+"'"
        cursor.execute(sql)
        if not cursor.fetchall():
            return True
        else:
            return False

    def queryUsedUrl(self,size):
        sql = "SELECT url FROM geturl order by id limit " + size
        return self.__queryUrl(sql)

    def queryContentUrl(self,size):
        sql = "SELECT url FROM getcontent order by id limit " + size
        return self.__queryUrl(sql)

    def __queryUrl(self, sql):
        cursor = DBManager.conn.cursor()
        cursor.execute(sql)
        return cursor.fetchall()

    def removeConetntUrl(self,url):
        sql = "delete from contenturl where url = %s"
        param = (url)
        self.__removeUrl(sql,param)

    def removeGetUrl(self,url):
        sql = "delete from geturl where url = %s"
        param = (url)
        self.__removeUrl(sql,param)

    def __removeUrl(self,sql,param):
        cursor = DBManager.conn.cursor()
        cursor.execute(sql,param)

class GetAllUrl(threading.Thread):

    def __init__(self,matchUrl,conetnteUrl,filterUrl,queue):
        self.matchUrl = matchUrl
        self.conetnteUrl = conetnteUrl
        self.filterUrl = filterUrl
        self.user_agent = 'User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; \
                Windows NT 5.1; SV1; CNCDialer; .NET CLR 2.0.50727; .NET CLR \
                3.0.04506.648; .NET CLR 3.5.21022)'
        threading.Thread.__init__(self)
        self.data = queue
        self.timeout = 5

    def __filterLink(self,link):
        for url in self.filterUrl:
            if url in link:
                return False
        return True

    def run(self):
        socket.setdefaulttimeout(self.timeout)
        headers = {'User_Agent':self.user_agent}
        while not self.data.empty():
            url = self.data.get()
            print "fetch url = %s"%url
            html = ''
            req = urllib2.Request(url,headers=headers)
            try:
                res = urllib2.urlopen(req)
                html = StringIO(res.read())
            except socket.timeout:
                print "socket timeout"
                continue
            except socket.error,e:
                print e.reason
            except urllib2.URLError,e:
                print e.reason
            except urllib2.HTTPError,e:
                print e.reason
            print "urllib ok"
            if html:
#                print res.read()
                links = SoupStrainer('a',href=re.compile('^'+self.matchUrl))
                soup = BeautifulSoup(html,parseOnlyThese=links)
                print "bs ok"
                db = DBManager()
                db.removeGetUrl(url)
                start = datetime.datetime.now()
                for a in soup:
                    link = a['href']
                    #starttime = datetime.datetime.now()
                    flag = db.uniqueUrl(link)
                    #endtime = datetime.datetime.now()
                    #print "db unique url usedtime:%s"%str((endtime - starttime).seconds)
                    if flag:
                        db.insertMd5key(link)
                        if self.conetnteUrl in link:
                            db.insertConetntUrl(link)
                        else:
                            if self.__filterLink(link):
                                db.insertGetUrl(link)
                end = datetime.datetime.now()
                print "db usedtime:%s"%str((end - start).seconds)
            else:
                print "html null"
            time.sleep(1)

class GetContentUrl(threading.Thread):
    
    def __init__(self):
        threading.Thread.__init__(self,queue)
        self.user_agent = 'User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; \
        Windows NT 5.1; SV1; CNCDialer; .NET CLR 2.0.50727; .NET CLR \
        3.0.04506.648; .NET CLR 3.5.21022)'
        self.data = queue
        self.timeout = 5

    def run(self):
        socket.setdefaulttimeout(self.timeout)
        headers = {'User_Agent':self.user_agent}
        while not self.data.empty():
            url = self.data.get()
            print "fetch url = %s"%url
            html = ''
            req = urllib2.Request(url,headers=headers)
            try:
                res = urllib2.urlopen(req)
                html = StringIO(res.read())
            except socket.timeout:
                print "socket timeout"
                continue
            except socket.error,e:
                print e.reason
            except urllib2.URLError,e:
                print e.reason
            except urllib2.HTTPError,e:
                print e.reason
            print "urllib ok"
            if html:
                soup = BeautifulSoup(html)
                comicInfo = soup.find('div',{'class':'box-720 newMov'})
                print "bs ok"
                db = DBManager()
                db.removeConetntUrl(url)
                print comicInfo.renderContents()
            else:
                print "html null"
            time.sleep(1)

class getUrlByProxy(object):
    def __init__(self,name,pwd,ip,port,timeout=5):
        self.name = name
        self.pwd = pwd
        self.ip = ip
        self.port = port
        self.timeout = timeout
        self.user_agent = 'User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; \
        Windows NT 5.1; SV1; CNCDialer; .NET CLR 2.0.50727; .NET CLR \
        3.0.04506.648; .NET CLR 3.5.21022)'
        self.headers = {'User_Agent':self.user_agent}
        self.proxy = "http://%s:%s@%s:%d"%(self.name,self.pwd,self.ip,self.port)
        
    def fetch(self,url):
        proxyHandler = urllib2.ProxyHandler({'http':proxyUrl})
        opener = urllib2.build_opener(proxyHandler)
        urllib2.install_opener(opener)        
        req = urllib2.Request(url,headers=self.headers)
        html = ''
        try:
            res = urllib2.urlopen(req)
            html = StringIO(res.read())
        except socket.timeout:
            print "socket timeout"
        except socket.error,e:
            print e.reason
        except urllib2.URLError,e:
            print e.reason
        except urllib2.HTTPError,e:
            print e.reason
        finally
            return html
    
if __name__=="__main__":
    baseUrl = "http://anime.xunlei.com/mh"
    matchUrl = "http://anime.xunlei.com/"
    conetnteUrl = "http://anime.xunlei.com/book/"
    filterUrl = ['http://anime.xunlei.com/contents',\
    'http://anime.xunlei.com/vod','http://anime.xunlei.com/detail',\
    'http://anime.xunlei.com/guide']
    db = DBManager()
    db.insertMd5key(baseUrl)
    db.insertGetUrl(baseUrl)
    while True:
        links = db.queryUsedUrl("100")
        if not links:
            print "no data"
            break
        queue = Queue()
        for link in links:
            queue.put(link[0])
        start = datetime.datetime.now()
        for i in range(10):
            t = GetAllUrl(matchUrl,conetnteUrl,filterUrl,queue)
            t.start()
            t.join()
        end = datetime.datetime.now()
        print "one url usedtime=%s"%str((end - start).seconds)
    print 'get url finish'