#!/usr/bin/python
# -*- coding: utf-8 -*-

import re
import string
import socket
import urllib
import MySQLdb

socket.setdefaulttimeout(10)

#综艺 电视剧 动漫
serial_category = ('c_85', 'c_97', 'c_100')

#连接数据库
conn = MySQLdb.Connection("172.16.7.58", "root",  "researchsucks", "X", 3306)
conn.set_character_set("utf8");
cur = conn.cursor()

serial_url_pattern = r'<li class="p_link"><a href="http://www.youku.com/show_page/id_(.*?).html" title="(.*?)"[\s\S]*?<li class="p_thumb"><img src="(.*?)"'
page_count_pattern = r'_type_pic_from_ajax_page_(\d*?).html'
cur_serial_url_pattern = r'<li class="v_thumb"><img src="(.*?)"[\s\S]*?<li class="v_time"><span class="num">(.*?)</span><span class="bg"></span></li>[\s\S]*?<li class="v_title">[\s\S]*?href="http://v.youku.com/v_show/id_(.*?).html" target="video">(.*?)</a>'

def GetPageCount(url):
    pn = 1
    page = urllib.urlopen(url).read()
    for match in re.finditer(page_count_pattern, page):
    	pn = max(pn, string.atoi(match.group(1)))
    return pn

def GetNoFromTitle(title):
    match = re.search(r' (\d+)', title)
    if match:
    	return match.group(1)
    match = re.search(r'(\d+) ', title)
    if match:
    	return match.group(1)
    match = re.search(r'(\d+)', title)
    if match:
    	return match.group(1)
    return "9999999"

def CrawlCurSerial(url, serial_id):
    page = urllib.urlopen(url).read()
    for match in re.finditer(cur_serial_url_pattern, page):
    	imagelink = match.group(1)
    	duration = match.group(2)
        id = match.group(3)
        title = match.group(4)
        no = GetNoFromTitle(title)
        cur.execute("insert ignore into serial_video(id, title, imagelink, serial_no, serial_id, duration) values('" + \
                id + "', '" + title + "', '" + imagelink + "', " + no + ", '" + serial_id + "', '" + duration + "')")

def CrawlSerial(serial_id, serial_name, serial_thumb, serial_category):
    cur.execute("insert ignore into serial(id, category, name, imagelink) values('" + serial_id + "', '" + \
            serial_category + "', '" + serial_name + "', '" + serial_thumb + "')")
    page_cnt = GetPageCount('http://www.youku.com/show_page/id_' + serial_id + '.html')
    for pn in range(1, page_cnt + 1):
        CrawlCurSerial('http://www.youku.com/show_eplist/showid_' + serial_id + '_type_pic_from_ajax_page_' + str(pn) + '.html', serial_id)

while True:
    for category in serial_category:
        for pn in range(1, 31):
            try:
                url = 'http://www.youku.com/v_olist/' + category + \
                        '_a__s__g__r__lg__im__st__mt__d_1_et_0_fv_0_fl__fc__fe_1_o_7_p_' + \
                        str(pn) + '.html'
                page = urllib.urlopen(url).read()
                for match in re.finditer(serial_url_pattern, page):
                    CrawlSerial(match.group(1), match.group(2), match.group(3),  category)
            except Exception, why:
                print Exception, ':', why

cur.close()
conn.close()
