import urllib2
from utils import rip_table_from_string
from utils import dive_into_soup
from BeautifulSoup import BeautifulSoup as Soup

kespa_url = 'http://www.progamer.or.kr'
schedule_url = kespa_url + '/schedule/daily01_sche.kea?m_code=sche_12&gDate=%s&gDvs=T&miniCal='
map_url = kespa_url + '/board/map_view.kea?m_code=board_32&mapC=map%d'
player_url = kespa_url + '/teams/player1.kea?m_code=team_24&pGame=1&pCode=%d'

def get_match_tables(date):
    date_string = date.strftime('%Y%m%d')
    url = schedule_url % date_string
    path = [0, 1, 4, 1, 1, 3, 0, 5, 3, 5]

    return get_magic_section(url, path)

def get_map_table(map_id):
    url = map_url % map_id
    path = [0,1,4,1,1,3,0,5,3,5,1,1,0]

    return get_magic_section(url, path)

def get_player_table(player_code):
    url = player_url % player_code
    path = [0,1,4,1,1,3,0,5,3,7]

    return get_magic_section(url, path)

def get_magic_section(url, path=[], encoding='euc-kr'):
    """
    url: The url to retrieve whose html the "magic section" should be found.
    path: Path of contents to traverse into the magic section.
    encoding: Encoding of the page.

    Desctiption:
        
    The Kespa site has pages that, when loaded in their entirety into
    BeautifulSoup, makes the Soup taste bad (it doesn't parse everything). In
    order to alleviate this, only a section of the site is taken. A good way to
    do this is to search for these strange "map" tags...
    <map name="Map2>
    (Alot of HTML, including the stuff we want...)
    <map name="MapMap">

    By getting the text between these two maps and putting it into a Soup, we
    come up with a correctly parsed Soup.
    """
    page = _get_page(url)

    start = page.find('<map name="Map2">')
    # Don't start with the map tag, start with the next table tag we find.
    start = page.find('<table', start)
    end = page.find('<map name="MapMap">')

    page = page[start:end]
    page = page.decode(encoding, 'replace')

    s = Soup(page)
    s = dive_into_soup(s, path)

    # XXX: Soups for some reason are different after this. We'll go ahead
    # and do this now so that we can easily work with the same type of soup
    # when testing.
    s = Soup(unicode(s).encode('utf-8'))

    return s

def _get_page(url):
    result = urllib2.urlopen(url)
    return "".join(result.readlines()).replace('\r', '').replace('\n', '')

