#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import math
import urllib2
from Route import *

url_template = "http://www.actrip.com/packsearch.aspx?pt1=%d&pageno=%d"

url_re = re.compile(r'''
             <a\shref="(/pack/pack\d+\.shtml)"[\s\S]+?>[\s\S]+?</tr>
             ''', re.X|re.S)

route_re = re.compile(r'''
             <tr>[\s]+<td>[\s\S]+?</td>[\s]+</tr>
             ''', re.X|re.S)

price_title_re = re.compile(r'''
             <td>(\d+)</td>\s+<td><a\shref="\S+"\s+title="([\s\S]+?)">预订</a></td>
             ''',re.X|re.S)

depcity_dest_re = re.compile(r'''
             <td>(\S+)</td>\s+<td>(\S+)</td>
             ''',re.X|re.S)

page_total_re = re.compile(r'''
             class="pageunchecked">(\d+)</span></td>
             ''', re.X|re.S)

detail_re = re.compile(r'''经理推荐</strong>([\s\S]+?)</td>[\s]+</tr>''',re.X|re.S)

schedule_re = re.compile(r'''<td\s\sclass="Indent[\s\S]*?"><h4([\s\S]+?)</td>''',re.X|re.S)

brief_re = re.compile(r'''height="90"\s/>([\s\S]+)$''',re.X|re.S)

num_re = re.compile(r'''第(\d+)天''',re.X|re.S)

place_re = re.compile(r'''</strong>([\s\S]+?)</h4>''',re.X|re.S)

def do_crawl():
    try:
        for i in range(1,10):
            html = urllib2.urlopen(url_template % (i,1)).read()
            if page_total_re.search(html):
                page_total = string.atoi(page_total_re.search(html).group(1))
                for page_no in range(1,page_total):
                    html = getpage(page_no)
                    if html:
                        route_list = route_re.findall(html)
                        for route_html in route_list:
                            route = Route()
                            route.site = 53
                            route.supplier_url = 'http://www.actrip.com/'
                            route.supplier = "雅程旅行网"
                            route.go_t = route.back_t = "-"
                            route_html = route_html.decode("gbk").encode("utf-8")
                            price_pt=price_title_re.search(route_html)
                            if price_pt:
                                route.price = price_pt.group(1)
                                route.title = price_pt.group(2)
                            depcity_pt=depcity_dest_re.search(route_html)
                            if depcity_pt:
                                route.outcity = depcity_pt.group(1)
                                route.dest = depcity_pt.group(2)
                            route_url_pt=url_re.search(route_html)
                            if route_url_pt:
                                route_url = route_url_pt.group(1)
                            route.rout_url = 'http://www.actrip.com/' + route_url
                            page = urllib2.urlopen(route.rout_url).read().decode("gbk").encode("utf-8")
                            do_parse(page,route)
                            print >>sys.stderr, 'updating', route.supplier, route.title
                            route.updRoute()
                            route.updRoute_Schedules()
            else:
                pass
    except:
        print traceback.format_exc(sys.exc_info())

def do_parse(html,route):
    route.detail = detail_re.search(html).group(1)
    schedule_list = schedule_re.findall(html)
    route.days = len(schedule_list)
    if route.days==0:
        route.days=1
    if detail_re.search(html):
        route.detail = detail_re.search(html).group(1)
    route.dates = "天天发团"
    if route.title.find('自由行') != -1:
        route.type = "自由行"
    else:
        route.type = "常规线路"
    i=0
    for schedule_html in schedule_list:
        i+=1
        schedule = Schedule()
        schedule.s_num = str(i)
        brief_pt=brief_re.search(schedule_html)
        if brief_pt:
            schedule.s_brief = brief_pt.group(1)
        place_pt=place_re.search(schedule_html)
        if brief_pt:
            place_temp = place_pt.group(1).split('-')
            if len(place_temp) == 2:
                schedule.s_place = place_temp[1]
            else:
                schedule.s_place = place_temp[0]
        route.schedules.append(schedule)

def getpage(pno):
    try:
        cookies = urllib2.HTTPCookieProcessor()
        opener = urllib2.build_opener(cookies)
        opener.open('http://www.actrip.com/packsearch.aspx')
        
        req = urllib2.Request(r'http://www.actrip.com/myajax.aspx',
                              'pack_search_p_city_b_mytxt=%u51FA%u53D1%u57CE%u5E02&pack_search_p_city_e_mytxt=%u76EE%u7684%u5730/%u5173%u952E%u5B57&ajax_type=htmlGoPage&paraname=PackLineSection&pageno='+str(pno))
        opener.open(req)
        req = urllib2.Request(r'http://www.actrip.com/packsearch.aspx?pageno='+str(pno))
        conn = opener.open(req)
        html = conn.read()
        opener.close()
        return html      
    except:
        print "except getpage " + str(pno)
        return None

if __name__ == '__main__':
    script_path = os.path.dirname(os.path.realpath(__file__))
    os.chdir(script_path)
    do_debug = False

    try:
        opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
    except getopt.GetoptError:
        usage()
        sys.exit()

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit()
        elif opt in ('-d', '--debug'):
            do_debug = True
        elif opt in ('-n', '--no-cache'):
            use_cache = False
        elif opt in ('-o', '--only-cache'):
            if arg.lower() in ('no', 'n', '0'):
                only_cache = False
            else:
                only_cache = True
        elif opt in ('-p', '--provider'):
            pass

    if do_debug:
        import pdb
        pdb.set_trace()
    do_crawl()
