#!/usr/bin/env python
########################################################################
#  Quote - On-line historical quotes
#  Copyright (C) 2007,8 Ofer Barkai
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
########################################################################
"""Retrieve historical quotes from Tel-Aviv Stock Exchange."""
import urllib
import urllib2
import re
import quote
from lib.secid import validIsin

def get(isin, d1, d2):
    if isin.startswith('IL0') and validIsin(isin):
        isin=isin[3:-1]
    elif len(isin)<=8 and isin.isdigit():
        isin = '0'*(8-len(isin))+isin
    elif isin.endswith('.0') and len(isin)<=10 and isin[:-2].isdigit():
        isin = '0'*(10-len(isin))+isin[:-2]      
    else:
        raise Exception('Illegal TASE/ISIN ID')
    d1 = quote.beforeFirst(d1) # make sure we have a previous "base" quote
    quote.info("tase.get(%s, %s, %s)", isin, d1, d2)
    url = 'http://www.tase.co.il' + \
          '/TASEEng/General/Company/companyHistoryData.htm?shareID='+isin
    file = urllib2.urlopen(url)
    cookie = file.info().getheader('set-cookie')
    page = file.read()
    url = re.search(r'<base\s.*href="([^"]+)"', page).groups()[0]
    viewstate = re.search(r'name="__VIEWSTATE"\s+value="([^"]+)"', 
                          page).groups()[0]
    def convert(d):
        return '/'.join((d[6:8], d[4:6], d[0:4]))
    data = (
        ("__VIEWSTATE", viewstate), 
        ("HistoryData1:hiddenID", "0"), # ?
        ("HistoryData1:rbPeriod", "rbPeriod8"), # From
        ("HistoryData1:dailyFromCalendar:cal_date", convert(d1)), 
        ("HistoryData1:dailyToCalendar:cal_date", convert(d2)), 
        ("HistoryData1:rbFrequency", "rbFrequency1"), # Daily
        ("HistoryData1:RBCoordinatedList", "AdjustmentRate"), # Adjusted to ...
        ("HistoryData1:CBDailyDFiledsList:0", "on"), # Closing Price ...
        ("HistoryData1:CBDailyDFiledsList:1", "on"), # Opening  Price
        ("HistoryData1:CBDailyDFiledsList:8", "on"), # Volume
        ("HistoryData1:CBDailyDFiledsList:3", "on"), # High
        ("HistoryData1:CBDailyDFiledsList:4", "on"), # Low
    )
    req = urllib2.Request(url, urllib.urlencode(data), {'Cookie': cookie})
    file = urllib2.urlopen(req)
    page = file.read().splitlines()
    result = {}
    for line in page:
        if line.find('DataGridItem') > 0:
            day, month, year, \
            adjusted, close, change, \
            open, high, low, \
            volume = \
                re.match(r'^.*>(\d\d)/(\d\d)/(\d\d\d\d)<' +
                         r'.*>([\d,.]+)<.*>([\d,.]+)<.*>([\d.%-]+)<' +
                         r'.*>([\d,.]+)<.*>([\d,.]+)<.*>([\d,.]+)<' +
                         r'.*>([\d,]+)<.*$', line).groups()
            result[year+month+day] = {'open':open, 'high':high, 'low':low, 
                                      'close':close, 'volume':volume, 
                                      'adjusted':adjusted}
    quote.adjusted2coef(result)
    return result

