#!/usr/bin/python
import urllib2
import os
import re

month2num = {
        'January'   : '01',
        'February'  : '02',
        'March'     : '03',
        'April'     : '04',
        'May'       : '05',
        'June'      : '06',
        'July'      : '07',
        'August'    : '08',
        'September' : '09',
        'October'   : '10',
        'November'  : '11',
        'December'  : '12'}

def official_download(version):
    prefix = "http://www.peanuts.com/search/?pubdate=&sort_by=bydate&seasonal=&startdate=&enddate=&selectcharacter=&type=comic_strips&keyword="
    keyword = mapping[version]
    url = prefix + keyword.replace(" ", "+")
    content = urllib2.urlopen(url).read()
    matched = re.findall(r'<img src="(http://www.peanuts.com/wp-content/comic-strip/color-low-resolution/.*.jpg)">', content)
    count = 0
    for entry in matched:
        fout = open(version + "-o-" + str(count) + ".jpg", "w")
        fout.write(urllib2.urlopen(entry).read())
        fout.close()
        count = count + 1
    return count

def unofficial_download(version, date, count):
    url = "http://www.gocomics.com/peanuts/" + date
    request_headers = {
            "Accept-Language": "en-US,en;q=0.5",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Referer": "http://thewebsite.com",
            "Connection": "keep-alive" 
            }

    request = urllib2.Request(url, headers=request_headers)
    content = urllib2.urlopen(request).read()
    matched = re.findall(r'.*(http://assets.amuniversal.com/.*?)\"', content)
    assert(len(matched) > 0)
    fout = open(version + "-u-" + str(count) + ".jpg", "w")
    fout.write(urllib2.urlopen(matched[0]).read())
    fout.close()

def getDate(version):
    keyword = mapping[version]
    prefix = "http://amureprints.com/reprints/results?feature_codes[]=pe&release_date_from=&release_date_to=&commit=Search&terms="
    url = prefix + keyword.replace(" ", "+")
    content = urllib2.urlopen(url).read()
    matched = re.findall(r'Peanuts.*\n.*\t(.*, \d\d\d\d).*', content)
    if len(matched) == 0:
        return []
    else:
        ret = []
        for idx in range(1, len(matched)):
            splits = matched[idx - 1].replace(",", "").split(" ")
            assert(len(splits) == 3)
            ret.append("%s/%s/%s" % (splits[2], month2num[splits[0]], splits[1]))
        return ret


fin = open("codenames", "r")
data = fin.read().splitlines(True)
fin.close()

mapping = {}
for line in data:
    version, nickname = line.rstrip().split(":")
    mapping[version] = nickname

for version in mapping.keys():
    print("processing version " + version + ": " + mapping[version])
    if not os.path.isdir(version):
        os.mkdir(version)
    os.chdir(version)
    count = 0
    if official_download(version) == 0:
        print("not found in official site")
    for date in getDate(version):
        unofficial_download(version, date, count)
        count = count + 1
    
    os.chdir("..")
    
