#!/usr/bin/env python
# encoding: utf-8

# 把str编码由ascii改为utf8（或gb18030）
import sys

reload(sys)
sys.setdefaultencoding('utf8')

import os
import os.path
import shutil
import time, datetime
from hashlib import md5
from bs4 import BeautifulSoup
from jsmin import jsmin
from csscompressor import compress

import git_test

# 根据给定的根目录，遍历并返回所有文件
def walk(roots):
    files = []
    for root in roots:
        for r, ds, fs in os.walk(root):
            for f in fs:
                files.append(os.path.join(r, f))
            dirs = []
            for d in ds:
                dirs.append(os.path.join(r, d))
            for f in walk(dirs):
                files.append(f)
    return [os.path.abspath(f) for f in files]


def copyFiles(sourceDir, targetDir):
    if sourceDir.find(".svn") > 0:
        return
    for file in os.listdir(sourceDir):
        sourceFile = os.path.join(sourceDir, file)
        targetFile = os.path.join(targetDir, file)
        print u"COPY：" + sourceFile + ' => ' + targetFile
        if os.path.isfile(sourceFile):
            if not os.path.exists(targetDir):
                os.makedirs(targetDir)
            if not os.path.exists(targetFile) or (
                        os.path.exists(targetFile) and (os.path.getsize(targetFile) != os.path.getsize(sourceFile))):
                open(targetFile, "wb").write(open(sourceFile, "rb").read())
        if os.path.isdir(sourceFile):
            copyFiles(sourceFile, targetFile)


def pack():
    # cdn = "https://dn-kaniu-fastmode-cdn.qbox.me/";
    cdn = '';
    # 获取当前工作目录
    root = os.getcwd()

    # 定义 src 目录路径
    src_path = os.path.abspath(os.path.join(root, "www"))
    print u"原码目录：" + src_path

    # 定义输出目录
    deploy_path = os.path.abspath(os.path.join(root, "deploy"))
    print u"发布目录：" + deploy_path

    img_deploy_path = os.path.join(deploy_path, "img")
    fonts_deploy_path = os.path.join(deploy_path, "fonts")
    lib_deploy_path = os.path.join(deploy_path, "lib")
    css_deploy_path = os.path.join(deploy_path, "css")
    js_deploy_path = os.path.join(deploy_path, "js")

    # 定义模板文件目录
    templates_path = os.path.join(src_path, "tpl")
    # 所有模板文件列表
    source_tpls = walk([templates_path])

    main_file_path = os.path.join(src_path, 'index.html')
    main_output_file_path = os.path.join(deploy_path, 'index.html');

    if (os.path.exists(deploy_path)):
        shutil.rmtree(deploy_path);
    os.makedirs(deploy_path);
    os.makedirs(img_deploy_path);
    os.makedirs(fonts_deploy_path);
    os.makedirs(js_deploy_path);
    os.makedirs(css_deploy_path);

    # 复制图片文件
    print u"\n复制图片文件"
    copyFiles(src_path + "/img", img_deploy_path);
    # print u"\n复制字体文件"
    # copyFiles(src_path + "/fonts", fonts_deploy_path);
    print u"\n复制库文件"
    copyFiles(src_path + "/lib", lib_deploy_path);

    # open(deploy_path + "/redirect.html", "wb").write(open(src_path + "/redirect.html", "rb").read())

    # 加载首页
    main_file = open(main_file_path)
    # 入口页面的内容
    main_file_content = main_file.readlines()
    main_file.close()

    print u"开始获取HTML模板文件"
    tpls = []
    for tpl in source_tpls:
        print u"处理模板文件：" + tpl
        f = open(tpl, "r")
        fls = f.readlines()
        f.close()

        fls = [l.strip() for l in fls]
        fs = u" ".join(fls)
        temp_fls = BeautifulSoup(fs, "html.parser")
        print u"获取模板中img标签"
        img = temp_fls.findAll("img")

        for temp_img in img:
            if (temp_img.has_attr('src') and not temp_img['src'].startswith('http://') and not temp_img[
                'src'].startswith(
                'https://')):
                src = temp_img['src']
                src = src.replace('../img/', "/img/")
                temp_img['src'] = cdn + src

        fs = str(temp_fls)

        tpl_id = tpl.replace(src_path + "/", u"")

        t = '<script type="text/ng-template" id="%s">%s</script>' % (tpl_id, fs)

        tpls.append(t)

    # 整合主入口文件与模板文件
    print u"整合主入口文件与模板文件"
    main_file_content = u" ".join(main_file_content).replace("<!-- RELEASE TEMPLATES HERE -->", u"".join(tpls), 1)

    print u"开始压缩JS与CSS"

    print u"解析主模板文件"
    soup = BeautifulSoup(main_file_content, "html.parser")

    print u"获取所有的 scripts 外部引用文件"
    scripts = soup.find("body").find_all("script")
    print u"压缩融合所有 scripts 文件"

    # 开始压缩JS
    lib_scripts = []
    app_scripts = []
    print u"开始压缩JS"
    print scripts
    for script in scripts:
        # 仅读取带 src 属性的 scripts 标签
        if (script.has_attr('src') and not script['src'].startswith('http://') and not script['src'].startswith(
                'https://')):
            src = script['src']
            print u"压缩：" + src
            script_file = open(os.path.join(src_path, src), "r")
            script_file_content = script_file.read()
            script_file.close()

            if src.startswith("lib"):
                lib_scripts.append(script_file_content)
            else:
                minified = jsmin(script_file_content)
                app_scripts.append(minified)

            # 从 html 文档流中删除该 script 标签
            script.extract()

    lib_script_content = "\n\n".join(lib_scripts);
    app_script_content = "\n\n".join(app_scripts);
    lib_scripts_md5 = md5(lib_script_content).hexdigest();
    app_scripts_md5 = md5(app_script_content).hexdigest();
    lib_script_deploy_suffix = 'js/lib.min.' + lib_scripts_md5 + '.js';
    app_script_deploy_suffix = 'js/app.min.' + app_scripts_md5 + '.js';
    lib_script_output_file_path = os.path.join(deploy_path, lib_script_deploy_suffix);
    app_script_output_file_path = os.path.join(deploy_path, app_script_deploy_suffix);
    lib_script_output_file = open(lib_script_output_file_path, "w")
    lib_script_output_file.write(lib_script_content);
    lib_script_output_file.close()

    app_script_output_file = open(app_script_output_file_path, "w")
    app_script_output_file.write(app_script_content)
    app_script_output_file.close()

    lib_script_tag = soup.new_tag("script", src=cdn + lib_script_deploy_suffix)
    app_script_tag = soup.new_tag("script", src=cdn + app_script_deploy_suffix)

    print u"写入新标签至index.html 中"
    soup.body.append(lib_script_tag)
    soup.body.append(app_script_tag)

    print u"获取所有的 stylesheets 外部引用文件"
    stylesheets = soup.find("head").find_all("link", {"rel": "stylesheet"})
    stylesheet_contents = []

    for stylesheet in stylesheets:
        # 仅读取带 href 属性的 link 标签
        if (stylesheet.has_attr('href')):
            href = stylesheet['href'];
            print u"压缩：" + href

            if (not href.startswith('lib')):
                stylesheet_file = open(os.path.join(src_path, href), "r")
                stylesheet_file_content = stylesheet_file.read()

                stylesheet_file.close()
                stylesheet_contents.append(compress(stylesheet_file_content))

                # 从 html 文档流中删除该 stylesheet 标签
                stylesheet.extract()

    app_stylesheet_content = "\n\n".join(stylesheet_contents);
    app_stylesheet_md5 = md5(app_stylesheet_content).hexdigest();
    app_stylesheet_deploy_suffix = 'css/app.min.' + app_stylesheet_md5 + '.css';
    stylesheet_output_file_path = os.path.join(deploy_path, app_stylesheet_deploy_suffix);
    stylesheet_output_file = open(stylesheet_output_file_path, "w")
    stylesheet_output_file.write(app_stylesheet_content)
    stylesheet_output_file.close()

    style_tag = soup.new_tag("link", href=cdn + app_stylesheet_deploy_suffix, rel="stylesheet")
    print u"写入新标签至index.html 中"
    soup.head.append(style_tag)

    # 将所有数据写入发布文件中
    deploy_html_file = open(main_output_file_path, "w")
    deploy_html_file.write(str(soup))
    deploy_html_file.close()
    git_test.startTag()


def main():
    pack()
    start

main()
