#!/usr/bin/env python3
import re
import sys
import urllib
import requests
from lxml import etree
from bs4 import BeautifulSoup as BS
# from bs4 import BeautifouSoup as BS

# import sqlites3
BLUE = '\033[0;34m%s\033[0m'
YELLOW =  '\033[0;33m%s\033[0m'
class DB:
    links = set()
    projects = {}

    @staticmethod
    def search_link_keyword(key):
        for i in DB.links:
            if i.find(key) != -1:
                yield i

class ContentParser:
    BS_BAK = "h3.repo-list-name > a "

    def __init__(self, html):
        self.e = etree.HTML(html)

    def load(self, html):
        self.e = etree.HTML(html)

    def bs_load(self, html):
        e = BS(html,'lxml')
        for i,link in enumerate(e.select(ContentParser.BS_BAK)):
            yield i,link

    def extract(self, key):
        for i in self.e.xpath(key):
            yield i

    def get_project_name(self, html):
        # ul = BS(html).select(key)[0]
        try:
            self.load(html)
            for i,v in enumerate( self.extract("//a[@href!='/'][@href!=''][@href!='javascript:;']")):
                if not hasattr(v, "text") or v.text == None :
                    continue
                text = v.text.strip().replace('\n','').replace('\r','')
                link = v.attrib['href']
            
                if link.startswith("http"):
                    continue

                print(i, text, link)
                DB.projects[text] = link
        except Exception:
            for i,v in self.bs_load(html):
                text = v.text.strip().replace('\n','').replace('\r','')
                link = v.attrs['href']

                if link.startswith("http"):
                    continue
                print(i, text, link)
                DB.projects[text] = link



class WEB:
    OUT_PART_TEXT = {
        "Skip to",
        "Skip up",
    }

    OUT_TEXT = {
        "Skip to",
        "Skip up",
        "Sign in",
        "Personal",
        "Open source",
        "Blog",
        "Pricing",
        "Explore",
        "Business",
    }

    OUT_LINK = {
        "#start-of-content",
        "/join?source=header",
    }
    

    def __init__(self, url , search_url='', **init_args):
        self.main_url = url
        DB.links.add(url)
        self.session = requests.Session()
        self.session.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'
        self.search_url = None
        if search_url:
            self.search_url = search_url if search_url.startswith("http") else urllib.parse.urljoin(self.main_url, search_url)
        self.main_html = self.g(url, **init_args)
        


    def g(self, url, m='GET',data=None):
        if m == 'GET':
            res = self.session.get(url)
            return res.content.decode(res.encoding, "ignore")
        if m == 'POST':
            return self.session.post(url, data=data).content.json()
        return None

    def _tr(self, dic):
        return '&'.join([ '='.join([pair[0], str(pair[1])])  for pair in  dic.items() ])

    def _get_tag(self,html):
        return set([i for i in re.findall(r'(?:\<)(.+?)(?:\>)', html) 
            if len(i) < 10 and not i.startswith('/')])

    def display(self, html):
        # tags = self._get_tag(html)
        # for i, v in enumerate(tags):
            # print(i,v)
        try:
            self.parser.load(html)
            tmp = set()
            for i ,v in enumerate(self.parser.extract("//a[@href!='/'][@href!=''][@href!='javascript:;']")):
                if not hasattr(v, "text") or v.text == None :
                    continue
                text = v.text.strip().replace('\n','').replace('\r','')
                if text in tmp or text in WEB.OUT_TEXT or text == '':
                    continue

                # for i in WEB.OUT_PART_TEXT:
                #     if text.find(i):
                #         continue

                link = v.attrib['href']
                if link.startswith("http"):
                    continue
                print(i, text, BLUE % (urllib.parse.urljoin(self.main_url, link)))
                DB.projects[text] = link
                tmp.add(text)

        except Exception:
            tmp = set()
            for i,v in self.parser.bs_load(html):
                text = v.text.strip().replace('\n','').replace('\r','')
                link = v.attrs['href']

                if link.startswith("http"):
                    continue
                print(i, text, BLUE % (urllib.parse.urljoin(self.main_url, link)))
                DB.projects[text] = link
                tmp.add(text)


    def into(self, url):
        return self.g(urllib.parse.urljoin(self.main_url, url))

    def info(self, html):
        self.parser.load(html)
        readme = list(self.parser.extract("//article"))[0]
        for i in readme.getchildren():
            print(YELLOW % i.text)


class Git(WEB):

    def __init__(self, url, search_key='q', search_url=''):
        super(Git, self).__init__(url, search_url=search_url)
        
        self.search_k = search_key
        self.html = None
        self.parser =  ContentParser(self.main_html)
        if not self.search_url:
            self.pre_parse()

    def pre_parse(self):
        [DB.links.add(i.attrib['href']) for i in self.parser.extract("//a[@href!='/'][@href!='javascript:;']")]
        self.search_url =  list(DB.search_link_keyword("search"))[0]

    def search(self, pro, page="1"):
        query = {
            self.search_k:pro,
            "p":page
        }
        re_url = self.search_url + "?" + self._tr(query)
        print(YELLOW % re_url)
        self.display(self.g(self.search_url + "?" + self._tr(query)))
        try:
            next_page_if = input("page [q exit, label to show info] >> ")
            print(DB.projects)
            if next_page_if in DB.projects:
                self.info(self.into(DB.projects[next_page_if]))
                # input(" >> continue")
            print(next_page_if)
            print(DB.projects.keys())
            if next_page_if == 'q':
                sys.exit(0)
            else:
                self.search(pro, page=next_page_if.strip())
        except Exception as e:
            print(e)
            # pass

if __name__ == "__main__":
    g = Git(sys.argv[1])
    project = None
    while project != 'exit':
        project = input(" >> project name\n>")
        g.search(project)
