#!/usr/bin/python

import urllib2
import re, tagpot, datetime
from xml.dom import minidom

#db setting
cursor = tagpot.dbConnect()


def getRSS(s):
    p_naver = re.compile('http://blog.naver.com/([a-zA-Z0-9_-]+)(/.*|)')
    # p_daum = re.compile('http://blog.daum.net/([a-zA-Z0-9_-]+)(/.*|)')
    p_tistory = re.compile('http://([a-zA-Z0-9_-]+).tistory.com(/.*|)')
    p_egloos = re.compile('http://([a-zA-Z0-9_-]+).egloos.com(/.*|)')
    
    e = p_egloos.search(s)
    t = p_tistory.search(s)
    n = p_naver.search(s)
    
    if e is not None:
        r = []
        r.append("http://"+e.group(1)+".egloos.com/index.xml")
        r.append("http://"+e.group(1)+".egloos.com")
        return r
    
    elif t is not None:
        r = []
        r.append("http://"+t.group(1)+".tistory.com/rss")
        r.append("http://"+t.group(1)+".tistory.com")
        return r
    
    elif n is not None:
        r = []
        r.append("http://blog.rss.naver.com/"+n.group(1)+".xml")
        r.append("http://blog.naver.com/"+n.group(1))
        return r
        
""" tag problem!!
    if site is 'd':
        d = p_daum = re.search(s)
        if d is not None:
            r = []
            r.append("http://blog.daum.net/"+e.group(1)+".egloos.com/index.xml")
            r.append("http://"+e.group(1)+".egloos.com")
            return r
"""
        
def readURL(url):
    feed = urllib2.urlopen(url)
    return feed

urls  = []
urls.append("http://www.hanrss.com/paper/")
urls.append("http://allblog.net")
urls.append("http://tistory.com")
urls.append("http://www.egloos.com/")
urls.append("http://www.egloos.com/index.php?page=1&period=&recomm=100")
urls.append("http://blog.daum.net/_blog/_top/_sub3/bestbloggerlist.do?_top_blogtop=navi_bestblog")
urls.append("http://www.blogkorea.net/bnmsvc/blcoTop130.do")
urls.append("http://blogplus.joins.com/")
urls.append("http://kr.openblog.com/")
urls.append("http://section.blog.naver.com/TodaysScrapList.nhn")
urls.append("http://www.3fishes.co.kr/share/")
urls.append("http://eolin.com")
urls.append("http://www.colcol.net/")
urls.append("http://kr.openblog.com/")




a = 'dd'
for i in urls:
    d = readURL(i)
    while a != '':
        a = d.readline()
        b = getRSS(a)
        if b is not None:
            cursor.execute("select url,hit from cts where url=%s",b[1])
            ok = cursor.fetchall()
            
            hit = 0
            for r in ok:
                hit = r[1]+1
            
            today = datetime.date.today()
            if hit is 0:
                q = "insert into cts(url, xml, hit, last) values ('%s', '%s', 0, '%s')" % (b[1], b[0], today)
            else:
                q = "update cts SET hit = %d, last = '%s' where url='%s'" % (hit, today,  b[1])
            print q
            cursor.execute(q)
    a = 'dd'