#!/usr/bin/env python
# coding=utf-8
# 上航假期
import os, sys, re, traceback, logging
import urllib,httplib
import time
import datetime
import getopt
import urllib2
from urllib2 import URLError
from Route  import *


mainBlock_re = re.compile(r'''
        您输入的查询条件为.*?(<table\b.*</table>).*?快速搜索
        ''', re.X|re.S)

vacationBlock_re = re.compile(r'''
       <table\b(.*?)</table>
        ''', re.X|re.S)

vacation_info_re_pre = re.compile(r'''
        <Dujia_Xianlu>(.*?)<\s*/\s*Dujia_Xianlu>
        ''', re.X|re.S)
xml_cdata_node_re = re.compile(r'''
        <\s*(\w+)\s*>\s*<!\[CDATA\[\s*(.*?)\s*]]>\s*<\s*/\s*\1\s*>
        ''', re.X|re.S)

def do_crawl():
    try:
        #报团
        server_urls = [('跟团旅游','http://www.sh-holiday.com/sales/list_search_result.asp?ItineraryType=0&FromCity=SHA&FromCity_Keyword=%C9%CF%BA%A3&DestCity=&DestCity_Keyword=&Date1=&Date2=&Keyword=&x=33&y=10')]
        for vacation_type, server_url in server_urls:
            
            html = urllib2.urlopen(server_url).read()
            html = html.decode("gbk").encode('utf-8')
            print html
        
            #获取有用的块
            mainblock = mainBlock_re.findall(html)[0]
        
            #获取所有描述线路的HTML片段 
            vacationBlockList = vacationBlock_re.findall(mainblock)
            print len(vacationBlockList)
        
            for div in vacationBlockList:
                route = Route()
                #站点ID
                route.site = 50
                #提供商名称
                route.supplier = '上航假期'
                #供应商url
                route.supplier_url = 'http://www.sh-holiday.com/'
                #价格
                route.price = re.compile(r'''<span\s*class=t_tx_09>(\d+)起?''', re.X|re.S).search(div).group(1)
                #线路标题
                route.title = re.compile(r'''<a[^<>]*?>(.*?)</a>''', re.X|re.S).search(div).group(1)
                #线路url
                route.rout_url = 'http://www.sh-holiday.com/sales/' + re.compile(r'''href=([^<>]+)''', re.X|re.S).search(div).group(1)
                #行程类型
                route.type =vacation_type
                #出发城市
                route.outcity = re.compile(r'''</a></td><td\b[^<>]*>(.*?)</td>''', re.X|re.S).search(div).group(1)
                #到达城市
                route.dest = re.compile(r'''</a></td><td\b[^<>]*>.*?</td><td\b[^<>]*>(.*?)</td>''', re.X|re.S).search(div).group(1)
                #行程天数
                route.days = re.compile(r'''</a></td>(?:<td\b[^<>]*>.*?</td>){2}<td\b[^<>]*>(.*?)</td>''', re.X|re.S).search(div).group(1)
                #出发交通工具
                route.go_t = re.compile(r'''</a></td>(?:<td\b[^<>]*>.*?</td>){3}<td\b[^<>]*>(.*?)</td>''', re.X|re.S).search(div).group(1)
                #回程交通工具
                route.back_t = route.go_t     
           
                date_str = re.compile(r'''出发日期</td><td[^<>]*>&nbsp;(.*?)</td>''', re.X|re.S).search(html).group(1)
                route.dates = getDates(date_str)
                print >>sys.stderr, 'updating', route.supplier, route.title
                #进一步解析 
                do_parse(route.rout_url,route)
                route.updRoute()
                route.updRoute_Schedules()
            

    except:
        print traceback.format_exc(sys.exc_info())
        

#解析每一天的行程安排
def do_parse(url,route): 
    keyId = re.compile(r'''=(\w+)''', re.X|re.S).search(url).group(1)
    tempUrl = 'http://www.sh-holiday.com/ballet/inner_itinerarydate.asp?ItineraryCode='+keyId
    tempHtml = urllib2.urlopen(tempUrl).read().decode("gbk").encode('utf-8')
    urlSuffix = re.compile(r"""src='([^']+)'""", re.X|re.S).search(tempHtml).group(1)
    theUrl = 'http://www.sh-holiday.com/sales/'+urlSuffix
    html = urllib2.urlopen(theUrl).read()
    html = html.decode("gbk").encode('utf-8')
    schedule_re =  re.compile(r'''<BR><BR>(.*?)<BR><BR>''', re.X|re.S)
    schedule_list = schedule_re.findall(html)
    for parthtml in schedule_list:
        schedule = Schedule()
        uparthtml= parthtml.strip().decode("utf-8")
        
        #第几天
        schedule.s_num = re.compile(r'''D(\d{1,2})''', re.X|re.S).search(parthtml).group(1)
        
        #游览地点
        dest_temp = re.compile(r'''D\d+:\s*<BR>(.*?)<BR>.*?<BR>''', re.X|re.S).search(parthtml)
        if dest_temp:
            xx = dest_temp.group(1)
            dest_temp2 = re.compile(u'''[-—]+([^-—(（]+)[^-—]*$''', re.X|re.S).search(xx.decode("utf-8"))
            if dest_temp2:
                yy = dest_temp2.group(1)
                schedule.s_place = yy.encode('utf-8')
                
        #详细描述
        brief_temp = re.compile(r'''<BR>.*?<BR>(.*?)<BR>''', re.X|re.S).search(parthtml)
        if brief_temp:
            schedule.s_brief = filter_tags(brief_temp.group(1))
            print schedule.s_brief
            
        #早餐： 1有、0没有 
        schedule.s_brf = '1' if re.compile(u'''早\s*餐：(.{1})''', re.X|re.S).search(uparthtml).group(1).strip() == '有' else '0'
        schedule.s_lunch = '1' if re.compile(u'''午\s*餐：(.{1})''', re.X|re.S).search(uparthtml).group(1).strip() == '有' else '0'
        schedule.s_dinner = '1' if re.compile(u'''晚\s*餐：(.{1})''', re.X|re.S).search(uparthtml).group(1).strip() == '有' else '0'
        
        #酒店描述：可选
        
        _hotel_temp = re.compile(u'''宿：\s*(.+)''', re.X|re.S).search(uparthtml)
        if _hotel_temp:
            schedule._hotel = filter_tags(_hotel_temp.group(1).encode('utf-8'))
        route.schedules.append(schedule)
    return 

'''
    功能：日期转化
    输入为：8/7 8/14 8/21 1/28
    输出为：2010-08-07;2010-08-14;2010-08-21;2011-01-28
''' 
def getDates(str):
    li = [];
    now = datetime.datetime.now()
    dateList = str.split(" ")
    for dateStr in dateList:
        mon,day = int(dateStr.split('/')[0]),int(dateStr.split('/')[1])
        theDay = datetime.datetime(now.year,mon,day)
        if theDay < now:
            theDay = datetime.datetime(now.year+1,mon,day)
        ss = theDay.strftime('%Y-%m-%d')
        li.append(ss)
        
    return ";".join(li)
      



def usage():
    print '''Usage: %s [OPTIONS]...
Crawl hotel informations.

  -d,  --debug               enable pdb debugger

  -h,  --help                display this help and exit
''' % sys.argv[0]


##过滤HTML中的标签
#将HTML中标签等信息去掉
#@param htmlstr HTML字符串.
def filter_tags(htmlstr):
    #先过滤CDATA
    re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>',re.I) #匹配CDATA
    re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>',re.I)#Script
    re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>',re.I)#style
    re_br=re.compile('<br\s*?/?>')#处理换行
    re_h=re.compile('</?\w+[^>]*>')#HTML标签
    re_comment=re.compile('<!--[^>]*-->')#HTML注释
    s=re_cdata.sub('',htmlstr)#去掉CDATA
    s=re_script.sub('',s) #去掉SCRIPT
    s=re_style.sub('',s)#去掉style
    s=re_br.sub('\n',s)#将br转换为换行
    s=re_h.sub('',s) #去掉HTML 标签
    s=re_comment.sub('',s)#去掉HTML注释
    #去掉多余的空行
    blank_line=re.compile('\n+')
    s=blank_line.sub('\n',s)
    s=replaceCharEntity(s)#替换实体
    return s

##替换常用HTML字符实体.
#使用正常的字符替换HTML中特殊的字符实体.
#你可以添加新的实体字符到CHAR_ENTITIES中,处理更多HTML字符实体.
#@param htmlstr HTML字符串.
def replaceCharEntity(htmlstr):
    CHAR_ENTITIES={'nbsp':' ','160':' ',
                'lt':'<','60':'<',
                'gt':'>','62':'>',
                'amp':'&','38':'&',
                'quot':'"','34':'"',}
    
    re_charEntity=re.compile(r'&#?(?P<name>\w+);')
    sz=re_charEntity.search(htmlstr)
    while sz:
        entity=sz.group()#entity全称，如&gt;
        key=sz.group('name')#去除&;后entity,如&gt;为gt
        try:
            htmlstr=re_charEntity.sub(CHAR_ENTITIES[key],htmlstr,1)
            sz=re_charEntity.search(htmlstr)
        except KeyError:
            #以空串代替
            htmlstr=re_charEntity.sub('',htmlstr,1)
            sz=re_charEntity.search(htmlstr)
    return htmlstr

def repalce(s,re_exp,repl_string):
    return re_exp.sub(repl_string,s)

if __name__ == '__main__':
    do_crawl()



