#!/usr/bin/python3
from sys import argv,stdout,exit
from os import makedirs,path
from threading import Thread,Lock,active_count
from http.client import HTTPConnection,HTTPSConnection
import re

class Leech:
  url=""
  protocol="http"
  site=""
  chan=""
  page=""
  resp=""
  pageContent=""
  cat=""
  thread=""
  fromto=""
  repo=""
  headers={"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.151 Safari/534.16"}
  cPicList=0

  def __init__(self,url):
    self.url=url
    self.pics=[]

  def info(self):
    if self.url.startswith("https://"):
      print(">>:: Encrypted",end="")
      self.protocol="https"
    elif self.url.startswith("http://"):
      print(">>:: Un-encrypted",end="")
    elif "://" not in self.url:
      print(">>:: Possibly Un-encrypted",end="")
    else:
      print(">>:: Hmmm, can't determine protocol. Stopping.",end="")
      exit(1)

    r=  re.compile("^\d+$")
    rx= re.compile("(https?://)?((.*\.)?(.*)\.(org|com|net))(/(.*)/res/(\d*)(\.html?)?)")
    rxs=re.compile("(https?://)?((.*\.)?(.*)\.(org|com|net))(/read.php\?b=(\w+)\&t=(\d+)\&p=(.*))")
    if r.search(self.url):
      self.url="http://boards.4chan.org/b/res/"+self.url
    if rx.search(self.url):
      match=rx.search(self.url)
      #print(match.groups())
      (self.site,self.chan,self.page,self.cat,self.thread)=(match.group(2),match.group(4),match.group(6),match.group(7),match.group(8))
      print(" Channel:",self.chan," - Category:",self.cat," - Thread:",self.thread,"<"+self.url+">")
    elif rxs.search(self.url):
      match=rxs.search(self.url)
      #print(match.groups())
      (self.site,self.chan,self.page,self.cat,self.thread,self.fromto)=(match.group(2),match.group(4),match.group(6),match.group(7),match.group(8),match.group(9))
      print(" Channel:",self.chan," - Category:",self.cat," - Thread:",self.thread," - Range:",self.fromto,"<"+self.url+">")
    else:
      print(" Sorry, can't parse url:",self.url)

  def connect(self,site):
    if self.protocol=="http":
      return HTTPConnection(site)
    elif self.protocol=="https":
      return HTTPSConnection(site)

  def getPage(self):
    c=self.connect(self.site)
    c.request("GET",self.page,headers=self.headers)
    self.resp=c.getresponse()
    if self.resp.status!=200:
      print("  :: Sorry, page not loaded:","<"+str(self.resp.status),self.resp.reason+">")
    else:
      self.pageContent=self.resp.read()
    return self.resp.status

  def getThreadText(self):
    rx=re.compile("(?<=<blockquote>).*?(?=</blockquote>)")
    matches=rx.findall(str(self.pageContent))
    fh=open(self.repo+"/_thread.txt","w")
    for c in range(len(matches)):
      #txt=matches[c]
      #txt=re.sub("<font.*?>","",txt)
      #txt=re.sub("</font>","",txt)
      #txt=re.sub("<a href.*?>","",txt)
      #txt=re.sub("</a>","",txt)
      #txt=re.sub("<br />","\n",txt)
      #txt=re.sub("\r\n","\n",txt)
      #txt=re.sub("\n\n","\n",txt)
      #txt=re.sub("&gt;",">",txt)
      txt=re.sub("&gt;",">",re.sub("\n\n","\n",re.sub("\r\n","\n",re.sub("<br />","\n",re.sub("</a>","",re.sub("<a href.*?>","",re.sub("</font>","",re.sub("<font.*?>","",matches[c]))))))))
      fh.write("-------------\n")
      fh.write(str(txt)+"\n")
    fh.close()

  def getPicList(self):
    #rx=re.compile("(?<=a href=\")(http.*){6,20}?http.*?/src/.*?(?=\")")
    #rx=re.compile("(https?://)?((.*\.)?(.*)\.(org|com|net))((/.{1,9}(/src)?/)(\d+\.(png|jpg|jpeg|gif)))")
    rx=re.compile("(https?://.{10,20}\.(org|com|net)/.{1,5}/src/\d+\.(png|jpg|jpeg|gif))")
    matches=[s[0] for s in rx.findall(str(self.pageContent))]
    if len(set(matches))!=0:
      self.pics=list((set(matches)))
    return len(self.pics)

  def listPics(self):
    for x in range(len(self.pics)):
      print(self.pics[x])

  def createRepo(self):
    self.repo=".chan/"+self.chan+"/"+self.cat+"/"+self.thread
    if path.exists(self.repo):
      print("  :: ... Repo:",self.repo,str(len(self.pics)),"pics. :: ",end="")
    else:
      try:
        print("  :: NEW Repo:",self.repo,str(len(self.pics)),"pics. :: ",end="")
        makedirs(self.repo)
      except:
        print("Could not create directory, check permissions:",self.repo)

  def getPic(self,idx):
    url=self.pics[idx]
    rx=re.compile("(https?://)?((.*\.)?(.*)\.(org|com|net))((/.*/)(\d+\.(png|jpg|jpeg|gif)))")
    if rx.search(url):
      match=rx.search(url)
      #print(match.group(2),match.group(6),match.group(8))
      (site,subUrl,pic)=(match.group(2),match.group(6),match.group(8))
      if path.exists(self.repo+"/"+pic):
        #print("D",sep="",end="")
        #stdout.flush()
        return True
      else:
        c=self.connect(site)
        headers=self.headers
        headers["Referer"]=self.url
        c.request("GET",subUrl,headers=headers)
        r=c.getresponse()
        if r.status==200:
          print("N",sep="",end="")
          stdout.flush()
          try:
            data=r.read()
            if len(data) != 0:
              fh=open(self.repo+"/"+pic,"wb")
              fh.write(data)
              fh.close()
            return True
          except:
            return False
        else:
          print("  :: Error getting pic",idx," - ",r.status, r.reason)
          return False
    else:
      print("Can't parse pic url:",url)
      return False

  def getPics(self):
    tlist=[]
    stdout.flush()
    for p in range(len(self.pics)):
      while active_count() > 7:
        pass
      t=Thread(target=self.getPic,args=[p])
      tlist.append(t)
      t.start()
    while active_count() > 1:
      pass
    print("")
    return True

class LeechKeeper():
  urls=[]
  leeches=[]

  def __init__(self):
    pass

  def getFromFile(self):
    print("Getting .watch list...")
    fh=open(".chan/.watch","r")
    for s in fh.readlines():
      self.urls.append(s.strip("\n"))
    fh.close()

  def getFromArgs(self,args):
    print("Getting Args...")
    for a in args:
      self.urls.append(a)

  def saveToFile(self):
    #print("Saving to .watch...")
    fh=open(".chan/.watch","w+")
    for s in self.urls:
      fh.write(s+"\n")
    fh.close()

  def runThemAll(self):
    print("Running leeches...")
    for u in self.urls:
      l=Leech(u)
      l.info()
      r=l.getPage()
      if r==404:
        self.urls.remove(u)
        self.saveToFile()
      else:
        l.getPicList()
        l.createRepo()
        l.getThreadText()
        l.getPics()

### Use the class!!!
lk=LeechKeeper()
myargs=argv[1:]

if "u" in myargs:
  myargs.remove("u")
  lk.getFromFile()

if len(myargs)!=0:
  if "u" not in argv:
    lk.getFromFile()
  lk.getFromArgs(myargs)
  lk.saveToFile()
  lk.urls=myargs

lk.runThemAll()
exit()
