#!/usr/local/bin/python

# 2009.4
# Yoongu Kim (yoongukim@cmu.edu)

####################################################
# initialization
####################################################
#libraries
import os, re
import urllib
from BeautifulSoup import *
import threading
import xml.dom.minidom
import datetime

####################################################
# misc.
####################################################
#remap stdout to handle utf-8
import codecs
import sys
stream_writer = codecs.lookup('utf-8')[-1]
sys.stdout = stream_writer(sys.stdout)

#day of the week
weekdays = [ 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun' ]
weekday_zip = zip(range(7), weekdays)
weekday_dict = dict(weekday_zip)

utc = datetime.datetime.utcnow()        #UTC
kt = utc + datetime.timedelta(hours=9)  #Korean time
kt_day = weekday_dict[kt.weekday()]     #Korea day of the week

####################################################
# definitions
####################################################
#rss definitions
rss_xml_name = "naver_webtoon.xml"
feed_url = 'http://feeds2.feedburner.com/NaverWebtoon'
rss_max = 30    #maximum items in the feed

#ascii database definitions
db_path = 'db'                  #path of database
db_mapper_name = '_mapper.txt'  #toon id-to-title mapping info file
db_day_name = '_day.txt'        #day of the week

#url definitions
base_url = 'http://comic.naver.com'
top_rel_url = '/weekday/webtoon.nhn'
toon_rel_url = '/webtoon/list.nhn'
epi_rel_url = '/webtoon/detail.nhn'

#query definitions
tid_query = 'titleId='
eid_query = 'no='
wkd_query = 'weekday='

####################################################
# gather list of toons from top page of naver comic
####################################################
#download toon list
top_url = base_url + top_rel_url
top_html = urllib.urlopen(top_url).read()

#selectively parse html tree
top_filter = SoupStrainer('a', href='#', id=None)
top_soup = BeautifulSoup(top_html, parseOnlyThese = top_filter)

#grab toon ids and titles
tids = []   #toon ids
ttitles= [] #toon titles

tid_re = re.compile(tid_query + '([0-9]+)') #regex for toon id

for toon_link in top_soup:
        tid = tid_re.search(str(toon_link)).group(1)
        tids.append(tid)

        ttitle = toon_link.div.img['title']
        ttitles.append(ttitle)

toons = zip(tids, ttitles)
toons = dict(set(toons))    #eliminate redundant [id, title] pairs

####################################################
# gather list of most recent episodes for toons
####################################################
recent_eids = {}        #most recent episode ids
recent_enames = {}      #most recent episode names
lock = threading.Lock() #mutex

eid_re = re.compile(eid_query + '([0-9]+)') #regex for episode id

#returns toon url (where episode lists exist)
def get_toon_url(tid):
        toon_rel_url_query = toon_rel_url + '?' + tid_query + tid
        toon_url =  base_url + toon_rel_url_query
        return toon_url

#returns filter for selectively parsing html tree
def get_epi_filter(tid):
        epi_rel_url_query = epi_rel_url + '\?' + tid_query + tid
        epi_filter_re = re.compile(epi_rel_url_query + '.*' + eid_query)
        epi_filter = SoupStrainer('a', href = epi_filter_re)    
        return epi_filter

#worker thread
class gather_epi(threading.Thread):
    def __init__(self, tid, ttitle):
        threading.Thread.__init__(self)
        self.tid = tid
        self.ttitle = ttitle

    def run(self):
        #print(self.ttitle)
        #download episode list
        toon_url = get_toon_url(self.tid)
        toon_html = urllib.urlopen(toon_url).read()

        #selectively parse html tree
        epi_filter = get_epi_filter(self.tid)
        toon_soup = BeautifulSoup(toon_html, parseOnlyThese = epi_filter) 

        #grab most recent episode id
        recent_eid = eid_re.search(str(toon_soup.contents[0])).group(1) 
        recent_ename = toon_soup.contents[0].img['title'] 
        lock.acquire()
        recent_eids[self.tid] = recent_eid
        recent_enames[self.tid] = recent_ename
        lock.release()
        #print(recent_eid)

#list of worker threads; one for each toon
epi_threads = [gather_epi(tid, ttitle) for tid, ttitle in toons.items()]

#spawn worker threads
for thr in epi_threads:
    thr.start()

#merge worker threads
for thr in epi_threads:
    thr.join()

####################################################
# update ascii db with most recent episodes
####################################################
#chdir to db directory
if not os.path.isdir(db_path):
    if os.path.exists(db_path):
        os.remove(db_path)
    os.mkdir(db_path)

os.chdir(db_path)

#write mapper (toon id-to-title)
db_mapper = open(db_mapper_name, 'w')
for tid, ttitle in toons.items():
    map = tid + '\t' + ttitle + '\n'
    db_mapper.write(map.encode('utf-8')) 
db_mapper.close()

#updated episodes
updated_eids = {}       #episode ids that were newly updated
updated_enames = {}     #episode names that were newly updated
for tid, recent_eid in recent_eids.items():
    db_eid_name = tid + '.txt'

    if os.path.exists(db_eid_name): #assume that it's not a directory
        db_eid = open(db_eid_name, 'r')
        last_eid = db_eid.readline()
        if last_eid != '' and int(recent_eid) <= int(last_eid):
            db_eid.close()
            continue

    db_eid = open(db_eid_name, 'w')

    #need update
    updated_eids[tid] = recent_eid
    updated_enames[tid] = recent_enames[tid]
    db_eid.write(recent_eid)
    db_eid.close()

####################################################
# rss new items
####################################################
#write day of the week
rss_items = [(kt_day.capitalize(), 
              base_url + top_rel_url + '?' + wkd_query + kt_day, 
              kt_day.capitalize())]

if os.path.exists(db_day_name):
    db_day = open(db_day_name, 'r')
    if kt_day == db_day.readline():
        rss_items = []

#number of rss new items
rss_new_max = 0

#new item: day of the week
if rss_items:
    db_day = open(db_day_name, 'w')
    db_day.write(kt_day)
    db_day.close()

    rss_new_max = len(rss_items)

#number of new rss items
rss_new_max = rss_new_max + len(updated_eids)
rss_new_max = min(rss_max, rss_new_max)

#there are no updated episodes; exit
if rss_new_max == 0:
    exit()

#new item: updated episodes
for tid, updated_eid in updated_eids.items():
    title = '[' + toons[tid] + '] ' + updated_enames[tid]
    link = base_url + epi_rel_url + \
            '?' + tid_query + tid + \
            '&' + eid_query + updated_eid
    description = title

    rss_items.insert(0, (title, link, description))
    if(len(rss_items) == rss_new_max):
        break

#revert to original directory
os.chdir('..')

####################################################
# read old rss
####################################################
if os.path.isfile(rss_xml_name):
    #read old rss
    rss_xml = open(rss_xml_name, 'r').read()

    #selectively parse xml tree
    rss_filter = SoupStrainer('item')
    rss_soup = BeautifulStoneSoup(rss_xml, parseOnlyThese = rss_filter)

    #read old rss items
    rss_old_max = min(rss_max - rss_new_max, len(rss_soup))

    titles = [rss_soup('title')[i].contents[0].strip() 
              for i in range(rss_old_max)]
    links = [rss_soup('link')[i].contents[0].strip() 
             for i in range(rss_old_max)]
    descriptions = [rss_soup('description')[i].contents[0].strip() 
                    for i in range(rss_old_max)]

    rss_items = rss_items + zip(titles, links, descriptions)

####################################################
# build rss
####################################################
def build_item(doc, parent, attr_list):
    item = doc.createElement('item')
    parent.appendChild(item)
    build_node(doc, item, attr_list)

def build_node(doc, parent, attr_list):
    for key, val in attr_list:
        tag = doc.createElement(str(key))
        val_encode = val.encode('utf-8')
        text = doc.createTextNode(val_encode)

        parent.appendChild(tag)
        tag.appendChild(text)

def build_rss(rss_items):
    doc = xml.dom.minidom.Document()

    rss = doc.createElement('rss')
    rss.setAttribute('version', '2.0')
    doc.appendChild(rss)

    chan = doc.createElement('channel')
    rss.appendChild(chan)

    build_node(doc, chan, [('title', 'Naver Webtoon'),
                           ('link', feed_url), 
                           ('description', 'Naver Webtoon RSS Feed by yoongu')])

    for title, link, description in rss_items:
        build_item(doc, chan, [('title', title),
                               ('link', link),
                               ('description', description)])

    rss_xml = open(rss_xml_name, 'w')
    doc.writexml(rss_xml, indent = '\t', addindent = '\t', 
                 newl = '\n', encoding = 'utf-8')

build_rss(rss_items)

