#!/usr/bin/env python
# coding=utf-8

import re
from sqlalchemy import Column, String, create_engine, Integer, or_, text, exists
from sqlalchemy.orm import sessionmaker, aliased
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.expression import func
import os.path as path
from urlparse import urlparse
import re
import requests

Base = declarative_base()
engine = create_engine('mysql+mysqlconnector://patchdetect:patchdetect@10.141.209.138:6603/patchdetect', echo=False)
DBSession = sessionmaker(bind=engine)
session = DBSession()
class BS(Base):
    __tablename__ = "binary-source"
    id = Column(Integer, primary_key=True, autoincrement=True)
    binary = Column(String(100))
    source = Column(String(100))

    def __repr__(self):
        return "<Package(id={}, binary=\"{}\", source=\"{}\")>".format(self.id, self.binary, self.source)

class HomePage(Base):
    __tablename__ = "homepage"
    id = Column(Integer, primary_key=True, autoincrement=True)
    pkg = Column(String(100))
    homepage = Column(String(100))
class GitUrl(Base):
    __tablename__ = 'git_url'

    id = Column(Integer, primary_key=True, autoincrement=True)
    url = Column(String(256))
    pkg = Column(String(100))

    def __repr__(self):
        return "<Package(id={}, url=\"{}\", pkg=\"{}\")>".format(self.id, self.url, self.pkg)
class Url(Base):
    __tablename__ = 'url'

    id = Column(Integer, primary_key=True, autoincrement=True)
    url = Column(String(256))
    pkg = Column(String(100))
    tld = Column(String(256))

    def __repr__(self):
        return "<Package(id={}, url=\"{}\", pkg=\"{}\")>".format(self.id, self.url, self.pkg)
class Package(Base):
    __tablename__ = 'packages'

    id = Column(Integer, primary_key=True, autoincrement=True)
    name = Column(String(100))

    def __repr__(self):
        return "<Package(id={}, name=\"{}\")>".format(self.id, self.name)

def init_db():
    '''
    创建数据库
    '''
    Base.metadata.create_all(engine)

init_db()

def insertUrlDataIntoDb():
    inserted_pkgs = [x[0] for x in session.query(Url.pkg).distinct(Url.pkg).all()]
    for record in session.query(BS.source).distinct(BS.source):
        package = record[0]
        if package in inserted_pkgs:
            continue
        filename = "output/DebianCrawler/{}".format(package)
        
        if not path.isfile(filename):
            continue
        with open(filename) as f:
            # 默认情况下使用bulk_insert_mappings进行插入，速度较快
            urls = [dict(url=l.strip(), pkg=package, tld=urlparse(l.strip()).netloc) for l in f if "git" in l]

            number_of_url = len(urls)
            if not number_of_url:
                continue
            
            for i in range(number_of_url / 50000 + 1):
                try:
                    session.bulk_insert_mappings(  
                        Url,  
                        urls[i * 50000: min((i+1) * 50000, number_of_url)]
                    ) 
                    session.commit()
                except Exception, e:
                    import traceback
                    traceback.print_exc()
                    session.rollback()



def parseGitUrl(url, pkg, b_s_map):
    def comparePkg(pkg, repo_name):
        source_package = b_s_map[pkg]
        p1 = pkg.replace(".","-").split('-')
        p2 = repo_name.replace(".","-").split('-')
        p3 = source_package.replace(".","-").split('-')
        for k in range(len(p1)):
            if p1[k].startswith("lib"):
                p1[k] = p1[k][3:]
            if p1[k] in ["haskell", "java", "python", "perl", "el", "js"]:
                p1[k] = ""
        for k in range(len(p2)):
            if p2[k].startswith("lib"):
                p2[k] = p2[k][3:]
            if p2[k] in ["haskell", "java", "python", "perl", "el", "js"]:
                p2[k] = ""
        for k in range(len(p3)):
            if p3[k].startswith("lib"):
                p3[k] = p3[k][3:]
            if p3[k] in ["haskell", "java", "python", "perl", "el", "js", "ghc"]:
                p3[k] = ""

        p1 = ''.join(p1).lower()
        p2 = ''.join(p2).lower()
        p3 = ''.join(p3).lower()

        return p1 == p2 or p2 == p3 or p1.startswith(p2) or p3.startswith(p2) or p2.startswith(p1) or p2.startswith(p3)

    if url=="http://libgit2.github.com/":
        return ("libgit2", "git@github.com:libgit2/libgit2.git")
    tld = urlparse(url).netloc
    if tld=='repo.or.cz':
        regex_search_result = re.findall(r'[\d\w\-_]+\.git', url)
        if not regex_search_result:
            return None
        repo_name = regex_search_result[0][:-4]
    

        url_splited = url.strip('/').split("/")
        if 'w' in url_splited:
            if url_splited[url_splited.index("w")+1].startswith(repo_name+".git"):
                clone_url = "git://repo.or.cz/{}.git".format(repo_name)
            else :
                extend = url_splited[url_splited.index("w")+1]
                clone_url = "git://repo.or.cz/{}/{}.git".format(extend, repo_name)
        else:
            if url_splited[url_splited.index("repo.or.cz")+1].startswith(repo_name+".git"):
                clone_url = "git://repo.or.cz/{}.git".format(repo_name)
            else :
                extend = url_splited[url_splited.index("repo.or.cz")+1]
                clone_url = "git://repo.or.cz/{}/{}.git".format(extend, repo_name)
    elif tld=="cgit.freedesktop.org":
        if url.endswith("org") or url.endswith("org/"):
            return None

        git_funcs = ["tree", "diff", "refs", "patch", "tag", "commit", "log"]
        url_splited = url.strip('/').split("/")

        for f in git_funcs:
            if f not in url_splited:
                continue
            index = url_splited.index(f)
            if index<0:
                continue
            repo_name = url_splited[index-1]
            target_url = '/'.join(url_splited[:index])
            break
        else:
            repo_name = url_splited[-1]
            target_url = url

        if not comparePkg(pkg, repo_name):
            return None

        try:
            text = requests.get(target_url).text
        except:
            return None
        
        regex_search_result = re.findall(r'git://[\w/\.]*{}'.format(repo_name), text)
        if not regex_search_result:
            return None
        clone_url = regex_search_result[0] 
    elif tld=="github.com":
        url_splited = url.split('/')
        if len(url_splited)<=4:
            # Example:　https://github.com/0install We cannot extract a repo name from this url
            return None
        # print url_splited
        repo_name = url_splited[4]
        # TODO: 此处对github url的处理可能存在一些问题
        if url.endswith(".git"):
            # Example: http://github.com/0install/0publish.git
            repo_name = repo_name[:-4]
            clone_url = url
        else:
            # Pattern: git@github.com:rfinnie/2ping.git
            clone_url = "git@github.com:{}/{}.git".format(url_splited[3], url_splited[4])
        if not repo_name:
            return None
    elif tld=="git.ao2.it":
        url_splited = url.split('/')
        if len(url_splited)<=4:
            # Example:　https://github.com/0install We cannot extract a repo name from this url
            return None
        repo_name = url_splited[3][:-4]
        clone_url = "git://git.ao2.it/{}.git".format(repo_name)
    elif tld=="cgit.osmocom.org":
        url_splited = url.split('/')
        if len(url_splited)<=4:
            # Example:　https://github.com/0install We cannot extract a repo name from this url
            return None
        repo_name = url_splited[3]
        clone_url = "git://git.osmocom.org/{}".format(repo_name)
    elif tld=="git.shadowcat.co.uk":
        if not url.startswith("git://"):
            return None
        #git://git.shadowcat.co.uk/catagits/Catalyst-Model-DBIC-Schema.git
        url_splited = url.split('/')
        if len(url_splited)<5:
            return None
        repo_name = url_splited[4][:-4]
        clone_url = url
    else:
        return None

    return repo_name, clone_url

def getCloneUrl(url, pkg, b_s_map):
    def comparePkg(pkg, repo_name):
        source_package = b_s_map[pkg]
        p1 = pkg.replace(".","-").split('-')
        p2 = repo_name.replace(".","-").split('-')
        p3 = source_package.replace(".","-").split('-')
        for k in range(len(p1)):
            if p1[k].startswith("lib"):
                p1[k] = p1[k][3:]
            if p1[k] in ["haskell", "java", "python", "perl", "el", "js"]:
                p1[k] = ""
        for k in range(len(p2)):
            if p2[k].startswith("lib"):
                p2[k] = p2[k][3:]
            if p2[k] in ["haskell", "java", "python", "perl", "el", "js"]:
                p2[k] = ""
        for k in range(len(p3)):
            if p3[k].startswith("lib"):
                p3[k] = p3[k][3:]
            if p3[k] in ["haskell", "java", "python", "perl", "el", "js", "ghc"]:
                p3[k] = ""

        p1 = ''.join(p1).lower()
        p2 = ''.join(p2).lower()
        p3 = ''.join(p3).lower()

        return p1 == p2 or p2 == p3 or p1.startswith(p2) or p3.startswith(p2) or p2.startswith(p1) or p2.startswith(p3)
    result = parseGitUrl(url, pkg, b_s_map)
    if not result:
        return None
    repo_name, clone_url = result
    if not comparePkg(pkg, repo_name):
        return None
    return clone_url

def getRepoName(url):
    repo_name = url.split("/")[-1]  
    if repo_name.endswith(".git"):
        repo_name = repo_name[:-4]
    return repo_name


def extractGitCloneUrl():
    inserted_pkgs = set([x[0] for x in session.query(GitUrl.pkg).distinct(GitUrl.pkg).all()])
    b_s_map = {}
    # TODO: 未考虑虚包
    for b,s in session.query(BS.binary, BS.source):
        b_s_map[b] = s
    for s_ in session.query(BS.source).distinct(BS.source):
        s = s_[0]
        b_s_map[s] = s
    start = False 
    for pkg_tuple in session.query(Url.pkg).distinct(Url.pkg):
        pkg = pkg_tuple[0]
        if pkg != "wmmon" and not start:
            continue
        else:
            start = True
        if pkg in inserted_pkgs:
            continue
        # pkg = 'evince'
        # print pkg
        # TODO: 是否需要考虑同一个pkg有多个clone url的情况？？？
        # TODO: 对于已经找过但没有找到的其实也可以直接不去查找
        # TODO: 未考虑过实际实际存在但名字不同的url
        for url_tuple in session.query(Url.url).filter(Url.pkg==pkg):
            url = url_tuple[0]
            clone_url = getCloneUrl(url, pkg, b_s_map)
            if clone_url:
                session.add(GitUrl(url=clone_url, pkg=pkg))
                session.commit()
                inserted_pkgs.add(pkg)
                break


def deleteWrongData():
    '''
    数据库中存在部分形如
    git@github.com:rlabduke/.git
    的错误数据，需要将其删除
    '''
    session.query(GitUrl.url).filter(GitUrl.url.op('regexp')('/\.git')).delete(synchronize_session=False)
    session.commit()

def deleteSingleUrlRecord():
    '''
    数据库中单条URL的记录是由于域名cache和对github url的过滤产生的
    现在更改了逻辑，不再对域名做cache，不再过滤非github.com的url
    因此需要删除数据库中对应的记录以便于后续的插入
    '''
    pkgs = session.query(Url).group_by(Url.pkg).having(func.count(Url.url)==1).all()
    print pkgs[:10]

def SelectPackagesHavingNoGitUrlFound():
    found_pkgs = session.query(BS.source).filter(or_(GitUrl.pkg==BS.binary, GitUrl.pkg==BS.source)).distinct(BS.source).all()
    # TODO: 下面这一句是不是有问题？
    all_pkgs = session.query(BS.source).filter(GitUrl.pkg==BS.binary).distinct(BS.source).all()
    print len(found_pkgs)
    print len(all_pkgs)
    found_pkgs = set([x[0] for x in found_pkgs])
    all_pkgs = set([x[0] for x in all_pkgs])
    # import IPython;
    # IPython.embed()
    for x in list(all_pkgs - found_pkgs)[:20]:
        print x

def getUnFoundPackages():
    '''
    获取未找到任何git url的package列表
    '''
    # found_pkgs = session.query(Url.pkg).distinct(Url.pkg).all()
    all_pkgs = session.query(BS.source).distinct(BS.source).all()
    # found_source_pkg = []

    # for pkg_ in found_pkgs:
    #     pkg = pkg_[0]
    #     src_pkg = session.query(BS.source).filter(or_(pkg==BS.source, pkg==BS.binary)).all()
    #     found_source_pkg.extend(src_pkg)
    
    # print len(found_source_pkg)
    # print len(all_pkgs)
    # found_source_pkg = set([x[0] for x in found_source_pkg])
    all_pkgs = set([x[0] for x in all_pkgs])

    # with open("output/found_source_pkgs", "w") as f:
    #     for x in found_source_pkg:
    #         f.write("%s\n"%x)
    
    with open("output/found_source_pkgs") as f:
        found_source_pkgs = set(f.read().split("\n"))

    with open("output/unfound_source_pkgs", "w") as f:
        for x in list(all_pkgs - found_source_pkgs):
            f.write("%s\n" % x)

    # from random import sample
    # for x in sample(all_pkgs - found_source_pkg, 20):
    #     print x

def getSourcePkg(pkg):
    return session.query(BS.source).filter(or_(pkg==BS.binary, pkg==BS.source))[0][0]
def getGitCloneUrl(pkg):
    result = session.query(GitUrl.url).filter(GitUrl.pkg==pkg).all()
    if not result:
        return None
    return result[0][0]

def getGitUrlData():
    '''
    获取所有package的git clone url数据
    '''
    return session.query(GitUrl.url, GitUrl.pkg).all()

if __name__=="__main__":
    # deleteSingleUrlRecord()
    getUnFoundPackages()
