#!/usr/bin/env python
# coding=utf-8
'''
Created on 2010-08-02

@author: Jason.q.yan
'''
import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import math
import urllib2
from Route import *

pageNo_re = re.compile(r'''
        页次：<strong><font\scolor=red>(\d+?)</font>/(\d+?)</strong>
        ''', re.X|re.S)

routeDiv_re = re.compile(r'''
        <tr>\s+?<td\sclass="lx_td">([\s\S]+?)</tr>
        ''', re.X|re.S)
 
routeUrl_re = re.compile(r'''
        <a\s*href="([^"]+)
        ''', re.X|re.S)
routeDiv_type = re.compile(r'''
        <a\s*href="http://www.imytour.com/([\s\S]+?)/(\S+?)
        ''', re.X|re.S)
schedule_re =  re.compile(r'''
        <td\sclass="show_routes"><b>行程参考</b></td>\s+?<td>([\s\S]+?)</td>
        ''', re.X|re.S)
#schedule_detail =  re.compile(r'''<div class="xxnr_content_tu">[\s\S]+?</div>''', re.X|re.S) 
                         
server_urls = [
               ('自由行','http://www.nbkhlxs.com/routes_gn.asp?page=%d'),
               ('自由行','http://www.nbkhlxs.com/routes_gw.asp?page=%d')
              ]

def do_crawl():
    try:
        for vacation_type,server_url in server_urls:
            html = urllib2.urlopen(server_url % 1).read().decode('gb2312','ignore').encode('utf-8')
            #获取总线路数算出总页数
            pageTotal = int(pageNo_re.search(html).group(2))
            #根据总页数，遍历所有线路列表页
            for pageNo in range(1,pageTotal+1):
                #获取所有描述线路的HTML片段 
                html = urllib2.urlopen(server_url % pageNo).read().decode('gb2312','ignore').encode('utf-8')
                route_list = routeDiv_re.findall(html)
                #遍历，解析
                for div in route_list:
                    route = Route()
                    route.site = 36
                    route.supplier_url = 'http://www.nbkhlxs.com/'
                    route.supplier = "宁波康辉"
                    route.go_t = route.back_t = "-"
                    route.rout_url= route.supplier_url+re.compile(r'''<a\shref="([\s\S]+?)">\s?([\s\S]+?)</a>''', re.X|re.S).search(div).group(1)
                    route.title =re.compile(r'''<a\shref="([\s\S]+?)">\s?([\s\S]+?)</a>''', re.X|re.S).search(div).group(2)
                    route.telphone="0574-87300028"
                    if re.compile(r'''￥(\d+)''', re.X|re.S).search(div):
                        route.price = re.compile(r'''￥(\d+)''', re.X|re.S).search(div).group(1)
                    go_date=re.compile(r'''<td>([\s\S]+?)</td>''', re.X|re.S).search(div).group(1)
                    if str(go_date).count("td")==0 |(str(go_date).count("月") | str(go_date).count("日"))==0:
                        route.dates=go_date
                    #根据URL获取线路详细页面的HTML
                    temp = urllib2.urlopen(route.rout_url).read().decode('gb2312','ignore').encode('utf-8')
                    #进一步解析 
                    do_parse(temp,route)
                    print >>sys.stderr, 'updating', route.title
                    route.updRoute()
                    #route.updRoute_Schedules()
    except:
        print traceback.format_exc(sys.exc_info())

#解析每一天的行程安排
def do_parse(html,route):
    #temp_outcity=re.compile(r'''<td\sclass="show_routes"><b>出\s?发\s?地</b></td>\s+?<td>([\s\S]+?)</td>''', re.X|re.S).search(html).group(1)
    #if str(temp_outcity).count("tr")==0:
        #route.outcity=temp_outcity
    route.outcity = '宁波'
    detail = re.compile(r'''<td\sclass="show_routes"><b>行程参考</b></td>\s+?<td>([\s\S]+?)</td>''', re.X|re.S).search(html).group(1)
    if detail:
        route.detail =detail
    schedule_list = schedule_re.findall(html)
    for parthtml in schedule_list:
        if re.compile(r'''第(\S+)天''', re.X|re.S).search(parthtml):
            days_list=re.compile(r'''第(\S+)天''', re.X|re.S).findall(parthtml)
            if len(days_list)>0:
                route.days = str(len(days_list))
    return 

if __name__ == '__main__':
    script_path = os.path.dirname(os.path.realpath(__file__))
    os.chdir(script_path)
    do_debug = False

    try:
        opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
    except getopt.GetoptError:
        usage()
        sys.exit()

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit()
        elif opt in ('-d', '--debug'):
            do_debug = True
        elif opt in ('-n', '--no-cache'):
            use_cache = False
        elif opt in ('-o', '--only-cache'):
            if arg.lower() in ('no', 'n', '0'):
                only_cache = False
            else:
                only_cache = True
        elif opt in ('-p', '--provider'):
            pass

    if do_debug:
        import pdb
        pdb.set_trace()
    do_crawl()
