#!/bin/python
# -*- coding: UTF-8 -*-

import os
import re
import sys
import time
import urllib2

from threading import Timer

def GetArg(name):
  if not name.startswith("-"): name = "-" + name
  if name not in sys.argv: return None
  p = sys.argv.index(name) + 1
  if p >= len(sys.argv): return None
  return sys.argv[p]

class Link:
  def __init__(self, link_url, title):
    self.link_url = link_url
    self.title = title
  def __str__(self):
    return "l: '%s' t:'%s'" % (self.link_url, self.title)

def GetAllLinksFromHTMLText(html_text):
  link_matcher = re.compile(".*href=\\\"(.*?)\\\".*?>(.*?)[<&].*")
  links = []
  for line in html_text.split("\n"):
    matched = link_matcher.match(line)
    if matched is None: continue
    link = Link(matched.group(1), matched.group(2))
    links.append(link)
  return links

def GetAllImageUrls(html_text):
  img_matcher = re.compile(".*?<img src=\\\"(.*?)\\\"(.*)")
  img_urls = []
  for line in html_text.split("\n"):
    while len(line) > 0:
      matched = img_matcher.match(line)
      if matched is None: break
      img_urls.append(matched.group(1))
      line = matched.group(2)
  return img_urls

def GetDemotivatorLinks(page_url):
  links = GetAllLinksFromHTMLText(RelyableReadUrl(page_url))
  demlinks = []
  for link in links:
    if link.title.find("емотива") > -1:
      demlinks.append(link)
  return demlinks

def GetDemotivatorLinksForPages(page_numbers):
  demlinks = []
  for page_number in page_numbers:
    link_bunch = GetDemotivatorLinks("http://ziza.qip.ru/page/%d/" % page_number)
    demlinks.extend(link_bunch)
  return demlinks

def GetDemotivatorImageUrls(page_url):
  html_text = RelyableReadUrl(page_url)
  all_images = GetAllImageUrls(html_text)
  demoimages = []
  for imgurl in all_images:
    if imgurl.find("/demotiv/") > -1:
      demoimages.append(imgurl)
  return demoimages

def BuildName2URL(imgurls):
  imgmatcher = re.compile(".*?(\\d\\d)(\\d+)/(\\d+)/.*?/(.+?)\\.(.*)")
  for imgurl in imgurls:
    matched = imgmatcher.match(imgurl)
    if matched is None:
      print "WARNING: '%s'" % imgurl
      continue
    name = "%s-%s-%s-%s.%s" % (matched.group(2),
                               matched.group(1),
                               matched.group(3),
                               matched.group(4),
                               matched.group(5))
    name = name.replace("/", "-")
    yield name, imgurl

#def GenerateNameBufferPairs(nameurl_pairs):
#  buffers = set()
#  for name, url in nameurl_pairs:
#    imgbuffer = urllib.urlopen(url).read()
#    if imgbuffer in buffers: continue
#    buffers.add(imgbuffer)
#    yield (name, imgbuffer)

def FetchNameUrlPairs(nameurl_pairs):
  for name, url in nameurl_pairs:
    #print "wget '%s' -O '%s' -t 10" % (url, name)
    os.system("wget '%s' -O '%s' -t 10" % (url, name))
    counter += 1
    print "'%s' %d from %d, %.1f%% completed." % \
          (name, counter, len(all_image_urls),
           100.0 * counter / len(all_image_urls))

def FetchDemotivators():
  pgbeg = GetArg("pgbeg")
  if pgbeg is None: pgbeg = 1
  pgend = GetArg("pgend")
  if pgend is None: pgend = 3
  pgbeg = int(pgbeg)
  pgend = int(pgend)
  print "Fetching demotivators for pages: %d .. %d to specify custom pages use: command line arguments: '-pgbeg X -pgend Y'" % (pgbeg, pgend)
  all_image_urls = []
  for link in GetDemotivatorLinksForPages(xrange(pgbeg, pgend)):
    all_image_urls.extend(GetDemotivatorImageUrls(link.link_url))
  print "Images to download:", len(all_image_urls)
  counter = 0
  nameurl_pairs = list(BuildName2URL(all_image_urls))
  nameurl_pairs.sort()
  FetchNameUrlPairs(nameurl_pairs)
#  for name, buffer in GenerateNameBufferPairs(nameurl_pairs):
#    f = open(name, "w")
#    f.write(buffer)
#    f.close()
#    counter += 1
#    print "'%s' %d from %d, %.1f%% completed." % (name, counter, len(all_image_urls), (100.0 * counter / len(all_image_urls)))

def RelyableReadUrl(link, timeout=5.0, attempts=10):
  sys.stdout.write("rget: '%s' " % link)
  for i in xrange(attempts):
    sys.stdout.write("o%i" % i)
    sys.stdout.flush()
    try:
      stream = urllib2.urlopen(link, timeout=timeout)
      if stream is None:
        sys.stdout.write("-")
        sys.stdout.flush()
        continue
    except:
      sys.stdout.write("-")
      sys.stdout.flush()
      continue
    sys.stdout.write("r")
    sys.stdout.flush()
    timer = Timer(timeout, stream.close)
    timer.start()
    try:
      buf = stream.read()
      timer.cancel()
      sys.stdout.write("+%i\n" % len(buf))
      return buf
    except:
      sys.stdout.write("-")
  return ""

def FetchUniversal():
  pgbeg = GetArg("pgbeg")
  if pgbeg is None: pgbeg = 1
  pgend = GetArg("pgend")
  if pgend is None: pgend = 2
  pgbeg = int(pgbeg)
  pgend = int(pgend)
  pgtmpl = GetArg("pgtmpl")
  if pgtmpl is None: pgtmpl = "http://ziza.qip.ru/page/"
  print ("Fetching images for pages: %d .. %d from: '%s' to specify" + \
         " custom pages use: command line arguments: '-pgbeg X " + \
         "-pgend Y -pgtmpl \"http://abc\"'") % (pgbeg, pgend, pgtmpl)
  all_links = set()
  for page_number in xrange(pgbeg, pgend):
    page_url = pgtmpl + str(page_number)
    links = GetAllLinksFromHTMLText(RelyableReadUrl(page_url))
    link_urls = [l.link_url for l in links]
    all_links = all_links.union(set(link_urls))
  image_links = []
  imagelink_matcher = re.compile(".*_\d+_foto.html")
  for link in all_links:
    if link.find("tellafriend") > -1: continue
    matched = imagelink_matcher.match(link)
    if matched is None: continue
    if matched.group(0) == link:
      image_links.append(link)
  all_image_urls = set()
  for i in xrange(len(image_links)):
    link = image_links[i]
    html_text = RelyableReadUrl(link) # urllib.urlopen(link).read()
    for image_url in GetAllImageUrls(html_text):
      if image_url.find("avatars.qip.ru") > -1: continue
      if image_url.find("tns-counter.ru") > -1: continue
      if image_url.find("counter.rambler.ru") > -1: continue
      if image_url.find("mc.yandex.ru") > -1: continue
      if image_url.find("count.rbc.ru") > -1: continue
      all_image_urls.add(image_url)
    print "%s got image links for pages: %i from %i, image urls: %i" % \
        (time.strftime("%H:%M:%S", time.gmtime()),
         i+1, len(image_links), len(all_image_urls))
  counter = 0
  for name, url in BuildName2URL(all_image_urls):
    #print "wget '%s' -O '%s' -t 10 -T 5 -w 1" % (url, name)
    os.system("wget '%s' -O '%s' -t 10" % (url, name))
    counter += 1
    print "'%s' %d from %d, %.1f%% completed." % (name, counter, len(all_image_urls), (100.0 * counter / len(all_image_urls)))

def GetRootPages(basePattern, beg, end):
  step = 1
  if end < beg: step = -1
  for n in xrange(beg, end+step, step):
    yield basePattern.format(n=n)

def GetAllLeafs(rootPage):
  buf = RelyableReadUrl(rootPage)
  prefix = "<h2> <a href=\""
  while True:
    p = buf.find(prefix)
    if p < 0: break
    p += len(prefix)
    buf = buf[p:]
    p = buf.find("\"")
    yield buf[:p]
    buf = buf[p:]

def leaf2location(leaf):
  buf = RelyableReadUrl(leaf)
  prefix = "flashvars=\"file="
  p = buf.find(prefix)
  if p < 0: return None
  p += len(prefix)
  buf = buf[p:]
  p = buf.find("&")
  if p < 0: return buf
  return buf[:p]

def ConstructName(rootPage, leaf, location):
  seria = re.match(".*?(\\d+).*", rootPage)
  if seria is not None and len(seria.groups()) > 0:
    seria = seria.groups()[0] + "-"
  else:
    seria = ""
  name = re.match(".*/(.*?)\\..*", leaf)
  if name is not None and len(name.groups()) > 0:
    name = name.groups()[0] + "-"
  else:
    name = ""
  code = re.match(".*/(.*)", location)
  if code is not None and len(code.groups()) > 0:
    code = code.groups()[0]
  else:
    name = ""
  result = seria + name + code
  result = result.replace("'", "")
  return result

def GetLocationNames():
  namesPool = set()
  for rootPage in GetRootPages(GetArg("b"), int(GetArg("f")), int(GetArg("t"))):
    print rootPage, ":::"
    for leaf in GetAllLeafs(rootPage):
      location = leaf2location(leaf)
      if location is None:
        print "*" * 20, "\n*\n* WARNING:\n* leaf failed:", leaf, "*\n*\n", "*" * 20
        continue
      name = ConstructName(rootPage, leaf, location)
      if name in namesPool:
        addid = "%.5i" % len(namesPool)
        name += addid
      namesPool.add(name)
      yield (location, name)

def FetchHDP():
  commands = []
  itemsPerBunch = int(GetArg("bs"))
  bunchBase = GetArg("bb")
  bunchCounter = [0]
  if bunchBase is None or len(bunchBase) == 0:
    bunchBase = "hdp-"
  def makeBunch(commands, bunchBase, bunchCounter):
    outputFile = "%s%.5i" % (bunchBase, bunchCounter[0])
    bunchCounter[0] += 1
    output = open(outputFile, "w")
    for c in commands:
      output.write(c)
      output.write("\n")
    output.close()
    os.system("chmod 755 " + outputFile)
    del commands[:]
    print "bunch '%s' made." % outputFile
  for (location, name) in GetLocationNames():
    command = "wget '%s' -O '%s' -t 10" % (location, name)
    commands.append(command)
    if len(commands) >= itemsPerBunch:
      makeBunch(commands, bunchBase, bunchCounter)
  makeBunch(commands, bunchBase, bunchCounter)

def main():
  taskname2executor = {
      "demos": FetchDemotivators,
      "uni": FetchUniversal,
      "hdp": FetchHDP,
  }
  task = GetArg("task")
  if task is None: task = "demos"
  executor = taskname2executor[task]
  executor()

if __name__ == "__main__":
  main()
