#!/usr/bin/python

import re, string, sys, httplib, urllib2, os, tagpot
import MySQLdb, time, datetime
from xml.dom import minidom

cursor = tagpot.dbConnect()

#read url from db and return array
def rssArray():
    cursor.execute("""select xml from cts""")
    ok = cursor.fetchall()
    arr = []
    for r in ok:
        arr.append(r[0])
    return arr

#read site and return item list
def readURL(url):
    try:
        request = urllib2.Request(url)
        opener = urllib2.build_opener()
        feed = u""
        feed = opener.open(request).read()
        xml = minidom.parseString(feed)
        items = xml.getElementsByTagName("item")
        return items
    except:
        print "readURL error"


# get Video Info xml from YouTube 
def xmlAccess(id):
    APIURL = "http://www.youtube.com/api2_rest?method=youtube.videos.get_details&dev_id=ZyogdbWOQHc&video_id="
    request = urllib2.Request(APIURL+id)
    opener = urllib2.build_opener()
    info = ""
    info = opener.open(request).read()
    x = minidom.parseString(info)
    if x.getElementsByTagName("video_details"):
        return    x.getElementsByTagName("video_details")[0]
    else:
        return 'no'

def detailAccess(xml,i):
    return xml.getElementsByTagName(i)[0].firstChild.data

def literal(val):
    if val:
        val = val.replace("'", "^")
        val = "'" + val + "'"
        return val       
    else:
        return "NULL"

# write video data to db
def videoWrite(d, tag, link):
    try:
        # get Youtube ID
        id = d.replace("http://www.youtube.com/v/","")
        id = id.split('&')[0]
    
        # from YouTube
        xml = xmlAccess(id)
        if xml != 'no':
            tags_video = detailAccess(xml,"tags")
            count_video = detailAccess(xml, "rating_count")
            
            rating_avg = detailAccess(xml, "rating_avg")
            chList = xml.getElementsByTagName("channel_list")[0]
            # only first channel
            ch = detailAccess(chList, "channel")
            title = detailAccess(xml, "title")
            length = detailAccess(xml, "length_seconds")
            img = detailAccess(xml, "thumbnail_url")
            count_view = detailAccess(xml, "view_count")
            embed = detailAccess(xml, "embed_status")
            today = datetime.date.today()
            
            # tag formatting
            tags_video = tags_video.replace(" ", ",")
            
            # from rss database
            tags_rss = tag
            link_rss = link
            qu = "insert into anc (anchor, ko, en, ref, count,date, rating_avg, ch, title, length, img, count_view, embed) values ('%s', %s, %s, '%s', '%s', '%s', '%s', '%s', %s, '%s', '%s', '%s', '%s')" %  (id, literal(tags_rss), literal(tags_video), link_rss, count_video, today, rating_avg, ch, literal(title), length, img, count_view, embed)
            cursor.execute(qu)
        else:
            qu = "insert into anc (ok) values ('0')"
            cursor.execute(qu);
            print "no video: "+id
    except:
        print "video write error"

#item parsing and insert it to db
def itemProc(item):
    title     = ""
    link     = ""
    dsc    = ""
    tagList    = ""
    hit    = 0
    tilte     = item.getElementsByTagName("title")[0].firstChild.data
    link     = item.getElementsByTagName("link")[0].firstChild.data
    #dsc     = item.getElementsByTagName("description")[0].firstChild.data
    tag     = item.getElementsByTagName("category")
    for i in tag:
        tagList=i.firstChild.data+","+tagList
    date = item.getElementsByTagName("pubDate")[0].firstChild.data
    
    # check duplication
    cursor.execute("""select link from rss where link=%s""",link) 
    ok = cursor.fetchall()
    f = 1
    for r in ok:
        f = 0
        
    if f:
        # embeded video search
        request = urllib2.Request(link)
        opener = urllib2.build_opener()
        str = ""
        str = opener.open(request).read()
        dsc= str    
        scan_youTube = re.compile ('<param .* value=\\\\\"(http://www.youtube.com/[^>]*)\\\\\">[^<>]*</param>')
        out = scan_youTube.search(dsc)
        if out:
            hit= hit+1    
            ok=""
            ok = out.group(1)
            #print "\t"+ok
            cursor.execute("""insert into rss (title, tag, link, date, content, hit) values (%s, %s, %s, %s, %s,%s)""", (literal(title), tagList, link, date, literal(ok), hit))
            videoWrite(ok, tagList, link)
        else:
            cursor.execute("""insert into rss (title, tag, link, date, hit) values (%s, %s, %s, %s, %s)""", (literal(title), tagList, link, date, hit))
    #else:
    #    print "duplicated: "+link

##################################################
list = rssArray()
for d in list:
    try:
        items = readURL(d)
        print d
        for i in items:
            itemProc(i)
    except:
        continue
###################################################


        
