#!/usr/bin/env python
# coding=utf-8

import os, sys, re, traceback, logging
import urllib
import time
import datetime
import getopt
import math
import urllib2
from Route import *

pageNo_re = re.compile(r'''
        <span\sid="_pageIndex">[\s]*?(\d+)</span>
        ''', re.X|re.S)

routeTotal_re = re.compile(r'''
        <span\sid="_AllCount"\sstyle="display:\snone;">\s+(\d+)?</span>
        ''', re.X|re.S)

routeDiv_re = re.compile(r'''
        <div\sclass='res_info\sfreeS'>([\s\S]+?)</div>\s+</div>
        ''', re.X|re.S)

routeUrl_re = re.compile(r'''
        href='(Package\.aspx\?id=\d+)'><span>查看详情</span>
        ''', re.X|re.S)

schedule_re =  re.compile(r'''
                     第\d+天[\s\S]+?</li>
        ''', re.X|re.S)

server_urls = [('自由行','http://package.byecity.com/search.aspx?key=&type=&start=&area=&sort=&ishot=&isdefault=&page=%d&price=&days=&month=')]

def do_crawl():
    try:
        for vacation_type, server_url in server_urls:
            html = urllib2.urlopen(server_url % 1).read()
            #获取总线路数算出总页数
            pageTotal = math.ceil(string.atoi(routeTotal_re.search(html).group(1)) / 10.0)
            #根据总页数，遍历所有线路列表页
            for pageNo in range(1,pageTotal + 1):
                #获取每页信息
                html = urllib2.urlopen(server_url % pageNo).read()
                #获取所有描述线路的HTML片段 
                route_list = routeDiv_re.findall(html)
                #遍历，解析
                for div in route_list:
                    route = Route()
                    route.site = 28
                    route.supplier_url = 'http://www.byecity.com/'
                    route.supplier = "佰程旅行网"
                    route.go_t = route.back_t = "-"
                    route.title = re.compile(r'''class='c3'>([\s\S]+?)</a></h3>''', re.X|re.S).search(div).group(1)
                    route.price = re.compile(r'''￥(\d+)起''', re.X|re.S).search(html).group(1)
                    route.type = re.compile(r'''产品主题：<span>([^</span>.*]+?)</span>''', re.X|re.S).search(html).group(1)
                    route.outcity = re.compile(r'''出发口岸：<span>([^</span>.*]+?)</span>''', re.X|re.S).search(html).group(1)
                    route.days = re.compile(r'''行程天数：<span>([^</span>.*]+?)</span>''', re.X|re.S).search(html).group(1)
                    route.dest = re.compile(r'''途径城市：<span>([^</span>.*]+?)</span>''', re.X|re.S).search(html).group(1)
                    date_result = re.compile(r'''出发日期：<span>(\S+?)到(\S+?)期间<span>''', re.X|re.S).search(html)
                    route.dates = date_result.group(1).strip()+";" if date_result.group(1).strip() == date_result.group(1).strip() else date_result.group(1).strip() + ";" +date_result.group(2).strip() + ";"
                    route_url = routeUrl_re.search(div).group(1)
                    route.rout_url = "http://package.byecity.com/" + route_url
                    #根据URL获取线路详细页面的HTML
                    temp = urllib2.urlopen(route.rout_url).read()
                    #进一步解析 
                    do_parse(temp,route)
                    print >>sys.stderr, 'updating', route.supplier, route.title
                    route.updRoute()
                    route.updRoute_Schedules()
                
    except:
        print traceback.format_exc(sys.exc_info())

#解析每一天的行程安排
def do_parse(html,route):
    img_temp = re.compile(r'''<ul><li\sclass='on'><img\ssrc='(\S+?)'\s''', re.X|re.S).search(html)
    if img_temp:
        route.img_url = img_temp.group(1)
    schedule_list = schedule_re.findall(html)
    for parthtml in schedule_list:
        schedule = Schedule()
        schedule.s_num = re.compile(r'''第(\d+)天''', re.X|re.S).search(parthtml).group(1)
        dest_temp = re.compile(r'''第\d+天&nbsp;(\w+)&nbsp;''', re.X|re.S).search(parthtml)
        if dest_temp:
            schedule.s_place = dest_temp.group(1)
        brief_temp = re.compile(r'''<span\sstyle="font-family:\s宋体">([\s\S]+?)</span>''', re.X|re.S).search(parthtml)
        if brief_temp:
            schedule.s_brief = brief_temp.group(1)
        schedule.s_brf = '0' if re.compile(r'''早\s+餐：(\S+?)&nbsp;''', re.X|re.S).search(parthtml).group(1).strip() == '自理' else '1'
        schedule.s_lunch = '0' if re.compile(r'''午\s+餐：(\S+?)&nbsp;''', re.X|re.S).search(parthtml).group(1).strip() == '自理' else '1'
        schedule.s_dinner = '0' if re.compile(r'''晚\s+餐：(\S+?)</li>''', re.X|re.S).search(parthtml).group(1).strip() == '自理' else '1'
        route.schedules.append(schedule)
    return 

if __name__ == '__main__':
    script_path = os.path.dirname(os.path.realpath(__file__))
    os.chdir(script_path)
    do_debug = False

    try:
        opts, args = getopt.gnu_getopt(sys.argv[1:], 'hdno:p:', ['help', 'debug', 'no-cache', 'only-cache', 'provider'])
    except getopt.GetoptError:
        usage()
        sys.exit()

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit()
        elif opt in ('-d', '--debug'):
            do_debug = True
        elif opt in ('-n', '--no-cache'):
            use_cache = False
        elif opt in ('-o', '--only-cache'):
            if arg.lower() in ('no', 'n', '0'):
                only_cache = False
            else:
                only_cache = True
        elif opt in ('-p', '--provider'):
            pass

    if do_debug:
        import pdb
        pdb.set_trace()
    do_crawl()
