'''
site/content.jar/content.xml
category
  unit[@org.eclipse.equinox.p2.type.category=true]
    requires
      required

'''
import urllib2
from xml.dom import minidom
import zipfile
import re
import os
import sys
import getopt
class EclipsePluginDownloader():
  def __init__(self,site=None):
    self.site=site

  def get_peroperty(self,u,k):
    return u.has_key(k) and u[k] or ''
  def cacl_url(self):
    self.bk_urls()
    self.save_file('%(site)s/content.jar'%{'site':self.site})
    with open('content.xml','w') as f:
      z = zipfile.ZipFile('content.jar','r')
      f.write(z.read('content.xml'))
      z.close()
    self.save_artifacts()
    doc= minidom.parse('content.xml')
    us=[]
    for ue in doc.getElementsByTagName('unit'):
      us.append({'id':ue.attributes['id'].value,'ps':{},'rs':[]})
      for pe in ue.getElementsByTagName('property'):
        us[-1]['ps'][pe.attributes['name'].value]=pe.attributes['value'].value
      for re in ue.getElementsByTagName('required'):
        us[-1]['rs'].append({'name':re.attributes['name'].value,'range':re.attributes['range'].value})

    us_map=dict(map(lambda u:(u['id'],u),us))
    ck='org.eclipse.equinox.p2.type.category'
    cs=[u for u in us if u['ps'].has_key(ck) and u['ps'][ck] == 'true']
    gk='org.eclipse.equinox.p2.type.group'
    gs=[u for u in us if u['ps'].has_key(gk) and u['ps'][gk] == 'true']
    


    ds=[]
    for c in cs:
      print c['ps']['org.eclipse.equinox.p2.name']
      for r in c['rs']:
        if r['name'] in us_map.keys():
          ds.append(us_map[r['name']])
          print '  ',len(ds)-1,ds[-1]['ps']['df_LT.featureName']

    print 'Uncategorized'
    for u in [u for u in gs if u not in ds]:
      ds.append(u)
      print '  ',len(ds)-1,ds[-1]['ps']['df_LT.featureName']
  
    print '''
input your select number split by comma
example:0,1,2,3
    '''
    
    
    doc= minidom.parse('artifacts.xml')
    rules=[]
    for rule in doc.getElementsByTagName('rule'):
      rules.append(rule.attributes)

    artifacts=[]
    for artifact in doc.getElementsByTagName('artifact'):
      artifacts.append({'id':artifact.attributes['id'].value,'output':self.get_output(rules,artifact)})
    
    for s in raw_input().split(','):
      self.download_unit(ds[int(s)],us_map,artifacts)
    

  def download_unit(self,unit,us_map,artifacts):
    for r in unit['rs']:
        if us_map.has_key(r['name']):
          self.download_unit(us_map[r['name']],us_map,artifacts)  
    for a in artifacts:
      if a['id']== unit['id']:
        self.save_file(a['output'],True);


   

  def get_output(self,rules,artifact):
    for rule in rules:
      match=True
      for r in map(lambda x:x.split('='),re.findall('\(([^\)\(]*\=[^\)\(]*)\)',rule['filter'].value)):
        if (not artifact.attributes.has_key(r[0])) or artifact.attributes[r[0]].value!=r[1]:
          match=False
          break
      if match:
        return  re.sub('\${([^}]*)}',r'%(\1)s',rule['output'].value)%{'repoUrl':self.site,'id':artifact.attributes['id'].value,'version':artifact.attributes['version'].value}

    
  def save_artifacts(self):
    self.save_file('%(site)s/artifacts.jar'%{'site':self.site})
    with open('artifacts.xml','w') as f:
      z = zipfile.ZipFile('artifacts.jar','r')
      f.write(z.read('artifacts.xml'))
      z.close()


  

  def save_url(self,url):
    with open('urls.txt','a') as urls:
      urls.write(url)
      urls.write('\n')

  def save_file(self,url,onlySaveURL=False,log=True):
    print 'downloading:%(url)s'%{'url':url}
    if onlySaveURL:
      if log: self.save_url(url)
      return
    else:
      if log:self.save_url('#'+url)
    file_path=url[len(self.site)+1:]
    dir ="/".join(file_path.split('/')[:-1])
    if dir and not os.path.exists(dir):
     os.makedirs(dir)
    with open(file_path,'wb') as local_file:
      web_file=self.get_opener().open(url)
      local_file.write(web_file.read())
      web_file.close()

  def get_opener(self):
    return urllib2.build_opener(urllib2.ProxyHandler({'http': 'http://172.28.9.151:8080/'}))

  def download(self):
    with open('urls.txt','r') as urls:
      while 1:
        line = urls.readline()       # read line by line
        if not line: break
        if not line.startswith('#'):
          self.save_file(line.strip(),False,False)

  def bk_urls(self):
    self.bk_file('urls.txt')
  def bk_file(self,file):
    if os.path.exists(file):
      m = re.match(r'(?P<file>.*)\.bk(?P<index>\d+)',file)
      index = m and int(m.group('index'))+1 or 1
      ofile = m and m.group('file') or file
      bk_file = ofile+'.bk'+str(index)
      self.bk_file(bk_file)
      os.rename(file,bk_file)
      










class EclipsePluginDownloader2():
  def __init__(self,site=None):
    self.site=site
  def cacl_url(self):
    self.bk_urls()
    site_xml=self.get_opener().open('%(site)s/site.xml'%{'site':self.site}).read()
    doc= minidom.parseString(site_xml)
    fs=[]
    for feature in doc.getElementsByTagName('feature'):
      fs.append({'url':feature.attributes['url'].value})
      for category in feature.getElementsByTagName('category'):
        fs[-1]['category']=category.attributes['name'].value
      self.save_file('%(site)s/%(feature_url)s'%{'site':self.site,'feature_url':feature.attributes['url'].value})
      z = zipfile.ZipFile(feature.attributes['url'].value,'r')
      content=z.read('feature.xml')
      doc = minidom.parseString(content)
      feature_name = doc.documentElement.attributes['label'].value
      if feature_name.startswith('%'):
        feature_name=feature_name[1:]
        print feature_name
        content=z.read('feature.properties')
        feature_name=re.search('[\r\n]*featureName=(?P<feature_name>[^\r\n]*)[\r\n$]*',content).group('feature_name')
      fs[-1]['feature_name']=feature_name or  'Uncategorized'
      z.close()
      
    cs=set([f['category'] for f in fs])
    df=[]
    for c in cs:
      for f in fs:
        if f['category'] == c:
          df.append(f)
          print '  ',len(df)-1,df[-1]['feature_name']
    print '''
input your select number split by comma
example:0,1,2,3
    '''
    for s in raw_input().split(','):
      self.download_by_feature(df[int(s)])

  def download_by_feature(self,feature):
    z = zipfile.ZipFile(feature['url'],'r')
    content=z.read('feature.xml')
    z.close()
    doc= minidom.parseString(content)
    for p in doc.getElementsByTagName('plugin'):
      self.save_file('%(site)s/plugins/%(id)s_%(version)s.jar'%{'site':self.site,'id':p.attributes['id'].value,'version':p.attributes['version'].value},True)

  def save_url(self,url):
    with open('urls.txt','a') as urls:
      urls.write(url)
      urls.write('\n')

  def save_file(self,url,onlySaveURL=False,log=True):
    print 'downloading:%(url)s'%{'url':url}
    if onlySaveURL:
      if log: self.save_url(url)
      return
    else:
      if log:self.save_url('#'+url)
    file_path=url[len(self.site)+1:]
    dir ="/".join(file_path.split('/')[:-1])
    if dir and not os.path.exists(dir):
     os.makedirs(dir)
    with open(file_path,'wb') as local_file:
      web_file=self.get_opener().open(url)
      local_file.write(web_file.read())
      web_file.close()

  def get_opener(self):
    return urllib2.build_opener(urllib2.ProxyHandler({'http': 'http://172.28.9.151:8080/'}))

  def download(self):
    with open('urls.txt','r') as urls:
      while 1:
        line = urls.readline()       # read line by line
        if not line: break
        if not line.startswith('#'):
          self.save_file(line.strip(),False,False)

  def bk_urls(self):
    self.bk_file('urls.txt')
  def bk_file(self,file):
    if os.path.exists(file):
      m = re.match(r'(?P<file>.*)\.bk(?P<index>\d+)',file)
      index = m and int(m.group('index'))+1 or 1
      ofile = m and m.group('file') or file
      bk_file = ofile+'.bk'+str(index)
      self.bk_file(bk_file)
      os.rename(file,bk_file)



class Epd():
  def execute(self):
    usage='''usage: epd.py site
e:epd.py --calc --download --site http://dl-prcsat.google.com/eclipse/plugin/3.4
e:epd.py --calc --site http://dl-prcsat.google.com/eclipse/plugin/3.4 
e:epd.py --download --site http://dl-prcsat.google.com/eclipse/plugin/3.4 
'''
    try:
      opts, args = getopt.getopt(sys.argv[1:], '', ['calc','download','site='])
      opts_dict=dict(opts)
      if not opts_dict.has_key('--site'):
        print usage
      elif not opts_dict.has_key('--calc') and not opts_dict.has_key('--download'):
        print usage
      else:
        try:
          if opts_dict.has_key('--calc'): EclipsePluginDownloader(opts_dict['--site']).cacl_url()
          if opts_dict.has_key('--download'):EclipsePluginDownloader(opts_dict['--site']).download()
        except:
          if opts_dict.has_key('--calc'):EclipsePluginDownloader2(opts_dict['--site']).cacl_url()
          if opts_dict.has_key('--download'):EclipsePluginDownloader(opts_dict['--site']).download()
    except getopt.GetoptError:
      print usage


  

if __name__=='__main__':
  Epd().execute()



