from .date import getDateForZheng
import re
import common.index as common
from simple_chalk import chalk


def getAllWords():
    with open("wordsAll.txt", "r", encoding="utf8") as f:
        lines = f.readlines()
        lines = [line for line in lines if line.strip()]

    return lines


def toChunks(ls, size=7):
    stop = size
    start = 0
    if stop > len(ls):
        return [ls]

    chunks = []
    while start < len(ls):
        chunks.append(ls[start:stop])
        start += size
        stop += size

    return chunks


def createWordFile(words, index, book, config):
    # startDate = '0706'
    curMaxSn = common.getMaxSnOfBook(book, config)
    # sn = curMaxSn + index + 1
    sn = curMaxSn + 1
    fileName = common.getFileName(book, sn)
    absFilePath = common.getAbsPath(
        "%s/%s" % (config.get("dailyDir"), fileName), config
    )

    lines = []
    with open(absFilePath, "w", encoding="utf8") as f:
        chunks = toChunks(words, halfLen(len(words)))

        firstPart = secondPart = False
        if len(chunks) >= 2:
            firstPart, secondPart = chunks
        elif len(chunks) == 1:
            firstPart = chunks[0]

        if firstPart:
            lines.extend(getThreeLines(firstPart))
        if secondPart:
            lines.extend(getThreeLines(secondPart))

        f.writelines(lines)

    print("写入文件成功: %s" % fileName)
    return True


def halfLen(num, isFloor=False):
    half = num // 2
    if half < num / 2 and not isFloor:
        half += 1

    return half


def getThreeLines(words):
    enRe = re.compile(r"^[\-a-zA-Z\s\.']*")

    cnWords = []
    enWords = []
    ybWords = ["-"] * 3

    # word:
    # painter 画家；油漆工
    # soap opera 肥皂剧
    for word in words:
        enMatchObj = enRe.match(word)
        enMatched = enMatchObj.group()
        if not enMatched:
            print("error: find english word fail", word)
            continue
        else:
            spaceRe = re.compile(r"\s+")
            spaceSemicolonRe = re.compile(r"\s+|；")
            # 单词之间的空格替换为 _
            enWords.append(spaceRe.sub("_", enMatched.strip()))
            start, stop = enMatchObj.span()
            # 中文之间的空格替换为 ;
            cnWord = spaceSemicolonRe.sub(";", word[stop:].strip())
            moreSemicolonRe = re.compile(r";{2,}")
            cnWord = moreSemicolonRe.sub(";", cnWord)
            cnWords.append(cnWord)

    threeLines = [
        " ".join(cnWords) + "\n",
        " ".join(enWords) + "\n",
        "".join(ybWords) + "\n",
    ]
    return threeLines


# startDate = "0930"


def createBookDailyFiles(book, config):
    # lines = getAllWords()
    lines = common.getBookContent(book, config)
    # 过滤掉空行
    # 过滤掉注释行
    lines = [
        line
        for line in lines
        if line.strip() and not line.strip().startswith(config.get("commentLineMark"))
    ]

    dailyWordList = toChunks(lines, config.get("dailyCount"))
    for index, words in enumerate(dailyWordList):
        createWordFile(words, index, book, config)

    print()
    print(chalk.yellow("单词集合 %s 已成功生成对应单词文件" % book))

    return True


if __name__ == "__main__":
    # createBookDailyFiles()
    print("test code here")
