import sys
import os
import re
import glob
import urllib
import urllib.request
import urllib.error
import zipfile
import logging
import logging.handlers

from io  import BytesIO
from bs4 import BeautifulSoup



class SeriesFileDescr:
  def __init__(self, filenameNoExt, name, season, episode, subtVersion="unknown" ,ext="unknown"):
    self.filenameNoExt = filenameNoExt
    self.name = name        
    self.season = season
    self.episode = episode
    self.subtVersion = subtVersion
    self.ext = ext

  def matchingVersions(self, other):
    retVal = False
    if not retVal and  self.filenameNoExt.find(other.subtVersion) >= 0: 
      retVal = True
    if not retVal and other.filenameNoExt.find( self.subtVersion) >= 0: 
      retVal = True
    
    return retVal
#end class SeriesFileDescr

my_logger = 1


def init_logging():
  global my_logger
  # create formatter
  LOG_FORMAT="%(asctime)s - %(name)s - %(levelname)s - %(message)s"

  logging.basicConfig(format=LOG_FORMAT,level=logging.DEBUG)

  my_logger = logging.getLogger('')

  # Add the log message handler to the logger
  handler = logging.handlers.TimedRotatingFileHandler("SubDown.log", backupCount=20)
#  handler = logging.handlers.RotatingFileHandler("SubDown.log", maxBytes=10240, backupCount=20)

  
  # add ch to logger
  #logger.addHandler(fh)
  my_logger.addHandler(handler)  


def get_page_soup(url):
    #print url
    headers = { "User-Agent" : "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)" }
    request = urllib.request.Request(url, headers=headers)

    data = ""    
    try:
      response = urllib.request.urlopen(request)
      data = response.read()
    except urllib.error.HTTPError as e:
      my_logger.warning(" Cannot navigate url: '%s'" % url)
      my_logger.warning(" Reason: '%s'" % e)
      
      
    soup = BeautifulSoup(data)
    return soup

def download_subtitle(avifilename):
    my_logger.info("Video filename='%s'," % avifilename)
    path, filename = os.path.split(avifilename)
#    print "=== " + filename + " === "
    fileNoExt = os.path.splitext(filename)[0]
    fileExt   = os.path.splitext(filename)[1]
    
    videoSerDF = extractSeasonEpisode(fileNoExt)

    if not videoSerDF:
      my_logger.warning("could not parse file '%s'" % avifilename)
      return None

    seriesNameToQuery = prepareSeriesName(videoSerDF.name)

    #my_logger.info("=== serName='%s',  serSeason='%s',  serEpisode='%s',  seriesNameToQuery='%s', serVersion='%s'," % (serName, serSeason, serEpisode, seriesNameToQuery, serVersion))
    
    epUrl = "http://www.subscenter.org/he/subtitle/series/%s/%s/%s/" % (seriesNameToQuery, videoSerDF.season, videoSerDF.episode)

    my_logger.debug(" quering url '%s' ..." % epUrl)
    soup = get_page_soup(epUrl)

    cnt = 1
    subtitleUrl = None

    for scriptHtmlObj in soup.findAll("script"):

# searching for the following , under <script>
#        subtitles_groups = {"en": {"1.Private Translated": {"ALL": {"4": {"created_by": "mail4junk", "created_on": "21.09.2010 ,08:51", "credits": {"2": {"\u05e1\u05e0\u05db\u05e8\u05d5\u05df": "addic7ed.com"}, "8": {"\u05e7\u05e8\u05d9\u05e2\u05d4": "addic7ed.com"}}, "downloaded": 482, "hearing_impaired": 0, "id": 65186, "is_sync": 0, "notes": "", "subtitle_version": "LOL"}}}}, "he": {"2.Qsubs": {"hdtv": {"1": {"created_by": "Qsubs", "created_on": "22.09.2010 ,02:19", "credits": {"1": {"\u05ea\u05e8\u05d2\u05d5\u05dd": "\u05d0\u05dc\u05db\u05e1\u05e0\u05d3\u05e8 \u05e4\u05df \u05d5-thebarak "}, "2": {"\u05e1\u05e0\u05db\u05e8\u05d5\u05df": "ZIPC "}}, "downloaded": 605, "hearing_impaired": 0, "id": 65240, "is_sync": 0, "notes": "\u05d7\u05d2 \u05e9\u05de\u05d7!", "subtitle_version": "House.S07E01.480p.HDTV.XviD.AC3-TiMPE"}, "2": {"created_by": "Qsubs", "created_on": "22.09.2010 ,01:13", "credits": {"1": {"\u05ea\u05e8\u05d2\u05d5\u05dd": "\u05d0\u05dc\u05db\u05e1\u05e0\u05d3\u05e8 \u05e4\u05df \u05d5-thebarak "}, "2": {"\u05e1\u05e0\u05db\u05e8\u05d5\u05df": "ZIPC "}}, "downloaded": 2737, "hearing_impaired": 0, "id": 65238, "is_sync": 0, "notes": "\u05d7\u05d2 \u05e9\u05de\u05d7!", "subtitle_version": "House.S07E01.HDTV.XviD-LOL"}, "3": {"created_by": "Qsubs", "created_on": "22.09.2010 ,01:13", "credits": {"1": {"\u05ea\u05e8\u05d2\u05d5\u05dd": "\u05d0\u05dc\u05db\u05e1\u05e0\u05d3\u05e8 \u05e4\u05df \u05d5-thebarak "}, "2": {"\u05e1\u05e0\u05db\u05e8\u05d5\u05df": "ZIPC "}}, "downloaded": 1083, "hearing_impaired": 0, "id": 65238, "is_sync": 0, "notes": "\u05d7\u05d2 \u05e9\u05de\u05d7!", "subtitle_version": "House.S07E01.720p.HDTV.X264-DIMENSION"}}}}}
#        subtitles_groups = {'he': {'1.Qsubs': {'hdtv': {'1': {'credits': {'1': {'\\u05ea\\u05e8\\u05d2\\u05d5\\u05dd': 'Shai10 \\u05d5-Godfather '}, '10': {'\\u05e1\\u05e0\\u05db\\u05e8\\u05d5\\u05df \\u05dc\\u05d2\\u05e8\\u05e1\\u05d4 \\u05d6\\u05d5': "ponkoit <span class='text_field' dir='ltr' >(Qsubs)</span>"}}, 'created_on': '20.10.2008 ,12:28', 'hearing_impaired': 0, 'downloaded': 103, 'is_sync': 0, 'notes': '', 'subtitle_version': 'The.Office.S05E03.720p.HDTV.X264-DIMENSION', 'id': 22596, 'created_by': 'HaiZone'}, '2': {'credits': {'1': {'\\u05ea\\u05e8\\u05d2\\u05d5\\u05dd': 'Shai10 \\u05d5-Godfather '}}, 'created_on': '20.10.2008 ,12:27', 'hearing_impaired': 0, 'downloaded': 311, 'is_sync': 0, 'notes': '', 'subtitle_version': 'The.Office.S05E03.HDTV.XviD-LOL', 'id': 22595, 'created_by': 'HaiZone'}}}}}
      cnt+=1
      scriptContent = "%s" % scriptHtmlObj
      
      nmatches = re.search("subtitles_groups.+?({.*}}}}})", scriptContent)
      if not nmatches:
#          print "2not f"
        dummy=2
      else:
        versionsStrDict = nmatches.group(1)
#          print "2found!"
#          print versionsStrDict
        
        try:
          allSubsGroupsDict = eval(versionsStrDict)
        except:
          my_logger.warning("cannot eval '%s'" % versionsStrDict)
          my_logger.warning("reason : %s" % sys.exc_info()[0])
          break

        #print "after  eval: %s" % allSubsGroupsDict
        
#        if allSubsGroupsDict.has_key('he'):
        if 'he' in allSubsGroupsDict:
          hebSubsDict = allSubsGroupsDict['he']  
#          print ('AA %s' % hebSubsDict)
#          secLevel = hebSubsDict.values()[0]
          secLevel = list(hebSubsDict.values())[0]
          #print "secLevel: ", secLevel
          thrdLevel = list(secLevel.values())[0]
          #print "thrdLevel: ", thrdLevel
          versDictList = thrdLevel.values()
          #print "versDictList: ", versDictList
          
          for currVerDict in versDictList:
            #print "currVerDict : ", currVerDict
            #print "  subtitle_version=", currVerDict.get('subtitle_version')
            
            #  
            subtFname = currVerDict.get('subtitle_version')
            
            subtSerDF = extractSeasonEpisode(subtFname);
            if not subtSerDF:
              my_logger.warning("could not parse subtitle version '%s'" % subtFname)
              return None
            
            # testing the filename with version
            #if filename.startswith(subtFname):
            if videoSerDF.matchingVersions(subtSerDF):
              currSubtId = currVerDict.get('id')
              currSubtKey = currVerDict.get('key')
              my_logger.info("!!!version matched!!! (%s), id is '%s', key is '%s'" % (subtFname, currSubtId, currSubtKey))
              
              # building the URL
              # http://www.subscenter.org/subtitle/download/he/73402/?v=Glee.S02E15.720p.HDTV.X264-DIMENSION
#             subtitleUrl = "subtitle/download/he/%s/?v=%s" % (currSubtId, subtFname)
              subtitleUrl = "he/subtitle/download/he/%s/?v=%s&key=%s" % (currSubtId, subtFname, currSubtKey)
              dl_sub(subtitleUrl, path, fileNoExt)
              break
        else:
          my_logger.info("No Hebrew translations")
        #the script found, can stop iter
        break
          
    #end-for

def dl_sub(subtitleUrl, path, fileNoExt):            
# http://www.subscenter.org/subtitle/download/he/73402/?v=Glee.S02E15.HDTV.XviD-LOL
  zipurl = "http://www.subscenter.org/" + subtitleUrl
  my_logger.info(" downloading '%s'..." % subtitleUrl)

  zipdata = None
  try:
    zipdata = urllib.request.urlopen(zipurl).read()
  except urllib.error.HTTPError as e:
    print(" warning, cannot download url: '%s'" % zipurl)
    print(" Reason: '%s'" % e)
    return None
      
  zipf = None

  try:
    bio = BytesIO(zipdata)
    zipf = zipfile.ZipFile(bio, "r")
  except zipfile.BadZipfile as e:
    my_logger.error("cannot open read zip. reason : '%s'" % e);
    return None

  if zipf:
    with open(os.path.join(path, fileNoExt + ".heb.srt"), "wb") as sub:
        for zipped in zipf.namelist():
            if zipped.endswith(".srt"):

#                readz = zipf.read(zipped).decode("abc")
#                print(readz)
#                txt = readz.replace('\r\r\n', '\r\n')
                sub.write(zipf.read(zipped))
                break


################################################################################
#
# rootDir            - where to collect files from. for example : c:\Videos
# includedExtensions - list of extensions to collect. extension for example: ".avi", ".mkv"
#
################################################################################
def collect_video_files(rootDir, includedExtensions):
  resultsList = []
  for dir_, dirnames, filenames in os.walk(rootDir):
    for filename in filenames:
      for ext in includedExtensions:
        if filename.endswith(ext):
          abspath = os.path.join(dir_, filename)
          #print "adding %s " % filename
          resultsList.append(abspath)
          my_logger.info(" collecting file: '%s'..." % abspath)
          break
  
  return resultsList

################################################################################
#
# absFiles       - source list of files. 
# twinExtensions - twin extensions. extension for example: ".abc", ".heb.srt"
#
# Logic:     
#  walking on files, if find "twin file"(same file name with with twin extension)
#  this file will be excluded and not returned.
#  for example:
#    file    : "c:\abc\moshe.txt", "c:\def\david.avi"
#    twinExt : ".sub"
#
#   if find file    "c:\abc\moshe.sub"
#   it will exclude "c:\abc\moshe.txt"  
#  
################################################################################
def exclude_files(absFiles, twinExtensions):
  resultsList = []
  for absFile in absFiles:
    path, filename = os.path.split(absFile)
    fileNoExt = os.path.splitext(filename)[0]
    
    extFound = False
    for twinExt in twinExtensions:
      absTwinFile = os.path.join(path, fileNoExt + twinExt)
      #print " twinFile %s" % absTwinFile
      if os.path.exists(absTwinFile) and os.path.isfile(absTwinFile):
        extFound=True
        break
    
    if not extFound:     
      resultsList.append(absFile)
      my_logger.info(" including for search: '%s'..." % absFile)

  return resultsList

################################
#
# input: 
#   shortFileName - with no extension!!!
#
# return
#   p1: seriesname
#   p2: season
#   p3: episode
#   p4: fileVersion, for example HDTV.XviD-LOL
################################
def extractSeasonEpisode(shortFileName):
  p = re.compile('(.*)[\.\-\ ][Ss]([0-9]+)[\.\-]?[EeXx]([0-9]+)(.*)')  
  mc = p.search(shortFileName)
  
  if not mc:
    return None
  #allGrp      = mc.group(0)
  serName     = mc.group(1)
  serSeason   = mc.group(2)
  serEpisode  = mc.group(3)
  fileVersion = mc.group(4).strip()
  serFD = SeriesFileDescr(shortFileName, serName, serSeason, serEpisode, subtVersion=fileVersion)
  #print mc.groups()
  #print allGrp
  #print  serName, serSeason, serEpisode, fileVersion

  return serFD
#END extractSeasonEpisode


################################
# return
################################
def prepareSeriesName(seriesName):
#  print "seriesName:" + seriesName
  p = re.compile('([\s+?]|\.)')  
  aa = p.sub(' ', seriesName).strip()
  aa = p.sub('-', aa)

#  print "new name:" + aa
  
  return specialSeriesMapping(aa.lower())
################################


################################
# return
################################
def specialSeriesMapping(seriesName):
  if (seriesName == "house"):
    return "house-md"

  return seriesName
################################


def main():
  init_logging()
  
  DOWNLOAD_DIR = "C:\\Users\\Gooli\\Downloads"
  DOWNLOAD_DIR = "\\\\SALON-PC\\Download\\The Big Bang Theory_se04"
  DOWNLOAD_DIR = "\\\\SALON-PC\\Download\\HouseMD_se07"
  DOWNLOAD_DIR = "\\\\SALON-PC\\Download\\Glee_se02"
  DOWNLOAD_DIR = "\\\\SALON-PC\\Videos\\TvSeries"
  DOWNLOAD_DIR = "D:\\Temp\\vidPython"

  my_logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
  my_logger.info(">>>>>> Starting (%s)" % DOWNLOAD_DIR)
  my_logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")

  collected = collect_video_files(DOWNLOAD_DIR, (".avi", ".mkv"))
  #print "collected:\n%s " % "\n".join(collected) 
  
  filesList = exclude_files(collected, (".heb.srt", ".he.srt", ".srt",))
  #print "excluded:\n%s " % "\n".join(excluded) 

  for file in filesList:
    #print "%s" % file
    download_subtitle(file)

#        path, filename = os.path.split(file)
#        extractSeasonEpisode(filename)
#        prepareSeriesName(file)
  my_logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
  my_logger.info(">>>>>> Ending")
  my_logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
#END main
            
if __name__ == "__main__":
    main()

