#!/usr/bin/python
#
# Copyright 2014 LeTV Inc. All Rights Reserved.
__author__ = 'guoxiaohe@letv.com'

# this class inherited from singleton
import re
import urlparse
import traceback

# from scrapy.settings import CrawlerSettings

"""
for video searching, we need distinct one video page by it's url
but, some unnecessary include some param, but some need:
eg: http://a.b.c/test.html?vid=12444&other=vaddfs#frag
eg: http://a.b.c/test.html?sourceid=234&vid=12444&other=vaddfs#frag
"""

# mapping id by url reg
# first will match

ID_MAPPING_REG = {
  "youku.com": [r'http://v\.youku\.com\/v_show'],
  "pptv.com": [r'http://v\.pptv\.com'],
  "funshion.com": [r'http://www\.fun\.tv'],
}

# remove other para not in list, 
# every value last index elecment is ignore empty value
# define format: 'domain' : [(key1, keey_empty), (key2, keep_empty)]
STRIP_QUERY = {
  "youku.com": ['from', 'f', 'o'],
  "pptv.com": ['rcc_src'],
  "funshion.com": ['alliance'],
  "default": [],
}

# define format: 'domain': [True | False]
KEEP_FRAGEMENT = {
  "default": False,
}

# define format: 'domain':{key1:value1, key2:value2}
ADD_EXTRA_PARA = {}


class UrlNormalize():
  # def init_onece(self, *args, **kwargs):
  def __init__(self):
    self.__load_settings()

  def __convert_map_regs(self, regmap):
    if not regmap:
      return {}
    tmpres = {}
    for (id, reglist) in regmap.items():
      regstmp = []
      for r in reglist:
        # print 're....%s' % r
        regstmp.append(re.compile(r, re.IGNORECASE))
      if regstmp:
        tmpres[id] = regstmp
    return tmpres

  def __accept_reg(self, id_reglist, item):
    if not item:
      return None
    for (id, tmpres) in id_reglist.items():
      for r in tmpres:
        if r.search(item):
          return id
    return None

  def __get_strip_para_list(self, mapdict, id):
    if mapdict.has_key(id):
      return mapdict[id]
    return []

  def __get_strip_query_lst(self, id):
    return self.__get_strip_para_list(self.__strip_query, id)

  def __keep_fragment(self, id):
    if not self.__keep_fragments or not self.__keep_fragments.has_key(id):
      return False
    return self.__keep_fragments[id]

  def __update_paras_with_extra(self, input_dict, id):
    if not self.__extra_para.has_key(id):
      return input_dict
    input_dict.update(self.__extra_para[id])

  def get_mapping_id(self, url=None):
    return self.__accept_reg(self.__id_mapping_reg, url)

  def __set_query_dict(self, org_dict, id):
    if org_dict is None or id is None:
      return {}
    domain_k_p = self.__get_strip_query_lst(id)
    retdict = {}
    # for (k, ef) in domain_k_p:
    for k, v in org_dict.items():
      if k not in domain_k_p:
        retdict[k] = org_dict[k]
    return retdict

  def __join_query(self, inputd):
    if not inputd:
      return ''
    query_str = None
    reslist = sorted(inputd.items(), key=lambda d: d[0], reverse=True)
    for (k, v) in reslist:
      if query_str:
        query_str += '&%s=%s' % (k, v[0])
      else:
        query_str = '%s=%s' % (k, v[0])
    return query_str

  def __load_settings(self):
    self.__id_mapping_reg = self.__convert_map_regs(ID_MAPPING_REG)
    self.__strip_query = STRIP_QUERY
    self.__keep_fragments = KEEP_FRAGEMENT
    self.__extra_para = ADD_EXTRA_PARA

  def get_unique_url(self,
                     url,
                     scheme=None,
                     netloc=None,
                     domain=None,
                     no_conf_no_oper=False):
    try:
      id = domain or self.get_mapping_id(url=url)
      if id is None:
        if not no_conf_no_oper:
          id = 'default'
        else:
          return url
      if id is None or url is None:
        raise Exception('Failed get mapping id for: %s, %s' % (domain, url))
      urlp = urlparse.urlsplit(url.strip(), allow_fragments=
      self.__keep_fragment(id))
      if not urlp:
        raise Exception('Failed convert urlparse %s' % url)
      nscheme = urlp.scheme or scheme
      nnetloc = urlp.netloc or netloc
      qdict = urlparse.parse_qs(urlp.query)
      fqdict = self.__set_query_dict(qdict, id)
      self.__update_paras_with_extra(fqdict, id)
      nquery = self.__join_query(fqdict)
      ret_url = urlparse.urlunsplit((nscheme, nnetloc, urlp.path, nquery,
                                  urlp.fragment)).strip()
    except:
      print traceback.format_exc()
      return url
    return ret_url


if __name__ == '__main__':
  un = UrlNormalize()
  print un.get_unique_url('http://google.com/test.html;12445#fadk?k1=v1&k2=v2')
  print un.get_unique_url('http://google.com/test.html;12445#fadk?k1=v1&k2=v2',
                          no_conf_no_oper=True)
  print un.get_unique_url('http://v.qq.com/cover/v/v0a5d3mvfs59t8q.html?vid=w0015xnrgrg')
  print un.get_unique_url('http://v.qq.com/cover/v/v0a5d3mvfs59t8q.html?vid=w0015xnrgrg&nonokey=nonvalue')
  print un.get_unique_url('http://v.qq.com/cover/v/v0a5d3mvfs59t8q.html?id=w0015xnrgrg&nonokey=nonvalue')
  print un.get_unique_url('http://v.qq.com/cover/v/v0a5d3mvfs59t8q.html?nonokey=nonvalue&vid=w0015xnrgrg')
  print un.get_unique_url('http://v.youku.com/v_show/id_XOTQ0MzQ0MTI4.html?from=s1.833.1')
  print un.get_unique_url('http://v.pptv.com/show/hdXLSeZYMWicSULg.html?rcc_src=B3')
  print un.get_unique_url('http://vod.kankan.com/v/89/89682.shtml?subid=493340')
  print un.get_unique_url('http://www.wasu.cn/Play/show/id/7016942?refer=letv.com')
  print un.get_unique_url('http://v.youku.com/v_show/id_XOTE3NjY3Nzg0.html?from=s1.833.1')
