"""
Get URL and resources API
    Larva:  base class
    Spider: threaded Larva
    Ant:    threaded massive link downloader
"""

# FIXMEs
# FIXME 1
# dots in URL
# http://www.ppmsg.com/siwameitui/201307/../201307/20891.html

# TODOs
# TODO 1
# maybe can use lambda for a more precise pattern match.



import sys
import time
import re
import os
import os.path as op
import urllib
import random
import thread
from types import *
import md5
from exceptions import *

class CGetLnk:

  def __init__(self,file_types=['jpeg','jpg','bmp'],retry=3):
    "file_type: file type list, ['jpeg','jpg','bmp']"
    if file_types!=[] and type(file_types)==ListType:
      self.lFileTypes = file_types
    else:
      print 'file type should be a list!'
    self.RETRY = retry

  def get_lnk(self,strin):
    sParse = strin.lower().strip()
    for x in self.lFileTypes:
      lTmp = re.findall(r'http://.*\.%s'%x,sParse)
      if lTmp!=[]:
        return lTmp[0]
    else:
      return ''
  
  def get_url(self,urlin):
    if urlin=='':
      #omit null string
      return 1
    sFileName = urlin[urlin.rfind(r'/')+1:].replace('?','.')
    if os.access(sFileName,os.F_OK):
      sFileName = '%s_%04x.%s'%(sFileName,random.randint(1,65535),urlin[urlin.rfind(r'.')+1:])
    nRetry = self.RETRY
    while nRetry:
      try:
        fUrl = urllib.urlopen(urlin)
        break
      except:
        print 'Connection error!: %s'%urlin
        nRetry -= 1
    else:
      print 'cant get: %s'%urlin
      return 1
    sTmp = fUrl.read(256)
    if 'html' in sTmp.lower():
      #block a page not found
      print '%s is not valid!'%urlin
      return 1
    fSav = file(sFileName,'wb')
    while sTmp!='':
      fSav.write(sTmp)
      sTmp = fUrl.read(4096)
    print 'done!: %s as %s'%(urlin,sFileName)


class Larva:
  'Net Spider Base class, searching for available resources in one website'
  def __init__(self,url,depth=3,retry=3,local=False):
    """ url:      WebSite base URL
        depth:    search depth
        retry:    retry times
        local:    search from other sites only
    """
    self.depth = depth
    self.RETRY = retry
    self.local = local
    self.RES_EMAIL = 0
    self.RES_BMP = 1
    self.RES_JPG = 2
    self.RES_GIF = 3
    self.RES_ZIP = 4
    self.RES_RAR = 5
    #TODO:  implement ALL_PIC
    self.RES_ALL_PIC = 6
    self.RES_LINK = 7
    self.url_content = ''
    self.url = ''
    self.cur_dir = ''
    self.check_url(url.lower())
    

  def check_url(self,url,chk_mode=False):
    """
      URL rule: if dir, like http://www.123.com, should add "/" after
                else, like http://www.12705.com/group_11_1.html, should not
    """
    url = url.strip('\n')
    if re.findall(r'^http://[a-z0-9\./_@&;%~=?,-]+$',url)==[]:
      print 'Warning!  URL %s should like: http://abc.def.efg.***[/]'%url
    if not 'http://' in url:
      raise IOError, '%s is not valid URL!'%url
    nRetry = self.RETRY
    while nRetry:
      try:
        fUrl = urllib.urlopen(url)
        break
      except:
        print 'Connection error!: %s'%url
        nRetry -= 1
    else:
      #raise 'cant get: %s'%url
      print 'cant get: %s'%url
      return url
    # succeed!

    # Check mode only check the url availability, both modes save
    # contents, but in diffrent location
    # check mode also store contents in a Binary way, used for binary resources
    sAll = ''
    if not chk_mode:
      try:
        for x in fUrl.readlines():
          sAll = sAll + x.replace('\n',' ') # replacing return with blank
      except:
        print '%s read error!'%url
        return url
      self.url_content = sAll
      self.url = url
      self.cur_dir = self.get_dir(url)
    else:
      try:
        sTmp = fUrl.read(256)
        while sTmp!='':
          sAll = sAll + sTmp
          sTmp = fUrl.read(256)
      except:
        print '%s read error!'%url
        return url
      self.bin_content = sAll
    # return null string if succeed, else return the failed url
    return ''

  def set_depth(self,depth):
    assert depth>0
    self.depth = depth

  def set_retry(self,retry):
    assert retry>0
    self.RETRY = retry

  def get_dir(self,url):
    """
      get dir from url, i.e.
      url = http://www.12705.com/12345/group_11_1.html
      return http://www.12705.com/12345/
    """
    assert url.count('/')>2
    if url[-1]=='/':
      return url
    else:
      return url.rsplit('/',1)[0]+'/'

  def spider(self,res_type,ptn='',match_mode=False):
    """ spider resource according to type needed, ptn is pattern to be matched
        match_mode = False: Normal string matching
                   = True:  Regex Matching 
    """
    sAll = self.url_content.lower() # make sure regex match
    # retrieving LINKs first
    # FIXME: Make sure can cover all the patterns
    lTmp = re.findall('href\s*=\s*\S+?[ >]',sAll)
    lLink = list()
    # cutting invalid links (scripts)
    for x in lTmp:
      if not match_mode:
        if not ptn in x:
          continue
      else:
        if re.findall(ptn,x)==[]:
          continue
      sTmp = x.split('=',1)[1].strip(' "<>/')  # get link contents
      if "'" in sTmp: # if invalid chars, skip
        continue
      if not ':' in sTmp: # a relative page, like: href="class.asp?leibie=haha"
        sTmp = self.cur_dir + sTmp
        if not sTmp in lLink:  # cut redundance
          lLink.append(sTmp)
      else:
        if not self.local:  # if only in local, this can skip
          if 'script' in sTmp.split(':')[0]:
            continue
          else:
            if not sTmp in lLink:  # cut redundance
              lLink.append(sTmp)
    if res_type==self.RES_EMAIL:
      pass
    elif res_type==self.RES_BMP:
      pass
    elif res_type==self.RES_JPG:
      #print len(re.findall('<img',sAll))
      # <img src="http://img3.ppmsg.net/Upload2010/53/OLsfhswxgmt/02.jpg alt="325235">
      lTmp = re.findall('src\s*=\s*\S+\.jp[e]?g',sAll)  # retrieving JPGs
      lJpg = list()
      for x in lTmp:
        if not match_mode:
          if not ptn in x:
            continue
        else:
          if re.findall(ptn,x)==[]:
            continue
        sTmp = x.split('=',1)[1].strip(' "<>/')  # get link contents
        if "'" in sTmp: # if invalid chars, skip
          continue
        if not ':' in sTmp: # a relative page
          sTmp = self.cur_dir + sTmp
          if not sTmp in lJpg:  # cut redundance
            lJpg.append(sTmp)
        else:
          if not self.local:  # if only in local, this can skip
            if 'script' in sTmp.split(':')[0]:
              continue
            else:
              if not sTmp in lJpg:  # cut redundance
                lJpg.append(sTmp)
      #print len(lJpg)
      return lJpg
    elif res_type==self.RES_LINK:
      return lLink

  def save_url(self,urlin):
    'save url contents into file'
    urlin = urlin.strip('\n')
    if self.check_url(urlin,chk_mode=True)!='':
      print 'cant get %s!'%urlin
      return urlin
    sFileName = self.change_name(urlin)
    #if len(self.bin_content)<(16*1024) and 'html' in self.bin_content.lower():
    if len(self.bin_content)>(1024):
      sTest = self.bin_content[:1024]
    else:
      sTest = self.bin_content
    if 'html' in sTest.lower():  # FIXME: may not be so accurate
      #block a page not found
      print '%s is not valid!'%urlin
      return urlin
    try:
      fSav = file(sFileName,'wb')
      fSav.write(self.bin_content)
      fSav.close()
    except:
      print 'cant write file %s, may be because of maxium files reached!!'%sFileName
      return urlin
    return ''

  def change_name(self,url):
    'change a unique name for a same set of resources'
    url = url.replace('?','.')
    return md5.new(op.split(url)[0]).hexdigest()+'_'+op.split(url)[1]


class Spider(Larva):
  'Threading Spider'
  def __init__(self,url_list,out_file,depth=3,retry=3,local=False):
    self.url_list = url_list  # url list to spider
    self.running = 0  # init state
    self.out_file = out_file
    Larva.__init__(self,url_list[0],depth,retry,local)

  def spider_wrapper(self,res_type,ptn='',match_mode=False):
    'wrapper for spider'
    fOut = file(self.out_file,'w')
    if os.access('fails_'+self.out_file,os.F_OK):
      os.remove('fails_'+self.out_file)
    # assert os.access(self.out_file,os.F_OK)
    self.progress = 0
    for x in self.url_list:
      sFail = self.check_url(x)
      if sFail!='':
        fFails = file('fails_'+self.out_file,'a')
        fFails.write(sFail+'\n')
        fFails.close()
        self.progress += 1
        continue
      for res in self.spider(res_type,ptn,match_mode):
        fOut.write(res+'\n')
      self.progress += 1
      time.sleep(2)
    fOut.close()
    self.running = 2  # finish state
      
  # FIXME: use mutex here!!!
  def threading_spider(self,res_type,ptn='',match_mode=False):
    'start thread with spider wrapper'
    if self.running==0:
      self.running = 1  # running state
      thread.start_new_thread(self.spider_wrapper,(res_type,ptn,match_mode))
    else:
      raise 'the thread is running!'

  def query_status(self):
    return self.running

  def query_progress(self):
    if self.progress>len(self.url_list):
      return 100.0
    else:
      return 100.0*self.progress/len(self.url_list)

class Ant(Larva):
  'Threading Save Url'
  def __init__(self,url_list,fail_name='ant_fails',retry=4,resume=0):
    self.running = 0  # init state
    self.url_list = url_list  # url list to save
    self.fail_name = fail_name
    if resume>=len(url_list):
      # already finished
      self.resume = len(url_list)
      self.progress = self.resume
      self.running = 2  # finished state
    else:
      self.resume = resume
      Larva.__init__(self,url_list[0],1,retry,True)
      # threading the save thread
      self.running = 1  # running state
      thread.start_new_thread(self.save_wrapper,())

  # FIXME: use mutex here!!!
  def save_wrapper(self):
    'wrapper for save_url'
    self.progress = self.resume
    for x in self.url_list[self.resume:]:
      url_fail = self.save_url(x)
      if url_fail!='':
        fFail = file(self.fail_name,'a')
        fFail.write(url_fail+'\n')
        fFail.close()
      self.progress += 1
    self.running = 2  # finished state

  def query_status(self):
    return self.running

  def query_progress(self,percent=True):
    if percent:
      if self.progress>len(self.url_list):
        return 100.0
      else:
        return 100.0*self.progress/len(self.url_list)
    else:
      return self.progress

def all_round(flt):
  'return 4 for 3.12, 5 for 4.8, 6 for 6.0'
  assert flt>=0 and type(flt)==FloatType
  if int(flt)!=flt:
    return int(flt+1)
  else:
    return int(flt)

# wait insect thread to come out
def wait(insect):
  while insect.query_status() != 2:
    time.sleep(3)
  # print insect.thread_id
  print 'insect died.'
