#!/usr/bin/env python
# coding=utf-8
'''
Created on 2010-7-27

@author: Jason.q.yan
'''
import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import math
import urllib2
from Route import *

pageNo_re = re.compile(r'''
        页次：<strong>(\d+)</strong>/<strong>(\d+)</strong>
        ''', re.X|re.S)

routeDiv_re = re.compile(r'''
		<div\sclass="list_content">([\s\S]+?)</dl>\s+</div>
        ''', re.X|re.S)
 
routeUrl_re = re.compile(r'''
		<a\s*href="([^"]+)
        ''', re.X|re.S)
routeDiv_type = re.compile(r'''
        <a\s*href="http://www.imytour.com/([\s\S]+?)/(\S+?)
        ''', re.X|re.S)
schedule_re =  re.compile(r'''
        <div\sclass="xxnr_content"\s\sid="pidinfo_cont_\d"\sstyle="\sdisplay:\s*\S+\s">([\s\S]+?)\s+?</div>\s+?</div>
        ''', re.X|re.S)
#schedule_detail =  re.compile(r'''<div class="xxnr_content_tu">[\s\S]+?</div>''', re.X|re.S) 
                         
server_urls = [('自由行','http://www.imytour.com/product/search_list.php?page=%d',)
			]

def do_crawl():
    try:
        for vacation_type, server_url in server_urls:
            html = urllib2.urlopen(server_url % 1).read()
            #获取总线路数算出总页数
            pageTotal = int(pageNo_re.search(html).group(2))
            #根据总页数，遍历所有线路列表页
           
            for pageNo in range(1,pageTotal+1):
                #获取所有描述线路的HTML片段 
                html = urllib2.urlopen(server_url % pageNo).read()
                route_list = routeDiv_re.findall(html)
                #print route_list
                #遍历，解析
                for div in route_list:
                    route = Route()
                    route.site = 52
                    route.supplier_url = 'http://www.imytour.com/'
                    route.supplier = "迈途旅行网"
                    route.go_t = route.back_t = "-"
                    route.title = re.compile(r'''&nbsp;\s([\s\S]+?)<span>''', re.X|re.S).search(div).group(1)
                    route.price = re.compile(r'''￥(\d+)起''', re.X|re.S).search(div).group(1)
                    
                    route.outcity = re.compile(r'''出发地：</b><font\scolor="\S+?">([\s\S]+?)</font></dd>''', re.X|re.S).search(div).group(1)
                    # 
                    detail = re.compile(r'''线路简介：</b>([\s\S]+)*</dd>''', re.X|re.S).search(div).group(1)
                    if detail:
                      route.detail =detail
                    route.img_url= route.supplier_url+re.compile(r'''<dt><a\shref="\S+?"><img\s*src="../([^"]+)''', re.X|re.S).search(div).group(1)
                    route.telphone="400-672-9993"
                    #route.days = re.compile(r'''行程天数：<span>([^</span>.*]+?)</span>''', re.X|re.S).search(html).group(1)
                    #route.dest = re.compile(r'''途径城市：<span>([^</span>.*]+?)</span>''', re.X|re.S).search(html).group(1)
                    route.rout_url = routeUrl_re.search(div).group(1)
                    type = routeDiv_type.search(div).group(1)
                    if (type=="free"):
                        route.type = "自由行"
                    else:
                        route.type = "常规线路"
                    date_result = re.compile(r'''出团日期：</b><font\scolor="\S+?">([\s\S]+?)</font></dd>''', re.X|re.S).search(div)
                    date = str(datetime.date.today().year)+"-"+date_result.group(1).strip() if date_result.group(1).strip() == date_result.group(1).strip() else date_result.group(1).strip() +date_result.group(2).strip()+";"
                    date =date.replace(", ", ";"+str(datetime.date.today().year)+"-")
                    date =date.replace(",", "")
                    date =date.replace("...", "")
                    route.dates = date
                    #根据URL获取线路详细页面的HTML
                    temp = urllib2.urlopen(route.rout_url).read()
                    #进一步解析 
                    do_parse(temp,route)
                    print >>sys.stderr, 'updating', route.supplier, route.title
                    route.updRoute()
                    route.updRoute_Schedules()
                
    except:
        print traceback.format_exc(sys.exc_info())

#解析每一天的行程安排
def do_parse(html,route):
    schedule_list = schedule_re.findall(html)
    
    for parthtml in schedule_list:

        schedule = Schedule()
        schedule.s_num = re.compile(r'''第(\d+)天''', re.X|re.S).search(parthtml).group(1)
        dest_temp = re.compile(r'''第(\d+)天\s([\s\S]+?)\s?-\s?([\s\S]+?)</td>''', re.X|re.S).search(parthtml)
        if dest_temp:
            schedule.s_place = dest_temp.group(2)
        brief_temp = re.compile(r'''<div\sclass="wen">([\s\S]+?)</div>''', re.X|re.S).search(parthtml)
        if brief_temp:
            schedule.s_brief = brief_temp.group(1)
        eat = re.compile(r'''用餐:\s*<font\scolor="\S+?">([\s\S]+?)</font>''', re.X|re.S).search(parthtml)
        if str(eat.group(1)).count('早')>0:
            schedule.s_brf = '1'
        if ( str(eat.group(1)).count('午')>0|str(eat.group(1)).count('中')>0):
            schedule.s_lunch = '1'
        if str(eat.group(1)).count('晚')>0:
            schedule.s_dinner = '1'
        route.schedules.append(schedule)
    return 

if __name__ == '__main__':
    script_path = os.path.dirname(os.path.realpath(__file__))
    os.chdir(script_path)
    do_debug = False

    try:
        opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
    except getopt.GetoptError:
        usage()
        sys.exit()

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit()
        elif opt in ('-d', '--debug'):
            do_debug = True
        elif opt in ('-n', '--no-cache'):
            use_cache = False
        elif opt in ('-o', '--only-cache'):
            if arg.lower() in ('no', 'n', '0'):
                only_cache = False
            else:
                only_cache = True
        elif opt in ('-p', '--provider'):
            pass

    if do_debug:
        import pdb
        pdb.set_trace()
    do_crawl()
