# -*- coding: utf-8 -*-
import os, re, json, xlwt, linecache
from wordseg_python import Global
from db import get_conn

prefix = os.path.split(os.path.realpath(__file__))[0]
dict_path = os.path.join(prefix, 'dict.json')


def split(sent):
    sent = sent.replace(' ', '').decode('utf-8', 'ignore').encode('gbk', 'ignore')
    tuples = [(word.decode('gbk').encode('utf-8'), pos) for word, pos in Global.GetTokenPos(sent)]
    return [each[0] for each in tuples]


def read_file(url):
    file_line = []
    file = linecache.getlines(url)
    for line in file:
        file_line.append(line.strip())

    return file_line


def get_data():
    conn = get_conn()
    cursor = conn.cursor()

    sql = 'SELECT id, pics, post_content_txt FROM task_803842 WHERE pics <> "[]" AND isLongText = "false" AND post_content_txt != "";'

    cursor.execute(sql)
    results = cursor.fetchall()

    return results


def get_img(start_id=1):
    conn = get_conn()
    cursor = conn.cursor()

    sql = 'SELECT id, img_id, context, img_url FROM image_after_filter WHERE id >= "%s";'
    param = (str(start_id),)
    cursor.execute(sql % param)
    results = cursor.fetchall()

    return results


def insert_data(id, context, df_score, img_url):
    conn = get_conn()
    cursor = conn.cursor()

    sql = 'INSERT INTO image_after_filter(img_id, context, df_score, img_url)' \
          ' VALUES (%s, %s, %s, %s)'
    param = (id, context, df_score, img_url)

    try:
        cursor.execute(sql, param)
    except Exception, e:
        print e.message

    conn.commit()
    cursor.close()
    conn.close()


def loadJson(url):
    with open(url) as jsonFile:
        data = json.load(jsonFile)
        return data


def generate_dict():
    all_words = {}
    data = get_data()

    for item in data:
        id = str(item[0])
        print id
        img_urls = json.loads(item[1])
        context = item[2].encode('utf-8')
        words = split(context)

        for word in words:
            pattern = re.compile(u"[\u4e00-\u9fa5]+")
            if pattern.match(word.decode('utf-8')):
                if word not in all_words:
                    all_words[word] = 1
                else:
                    all_words[word] += 1

    with open(dict_path, 'w') as f:
        json.dump(all_words, f)

    out_words = sorted(all_words.items(), key=lambda item: -item[1])
    with open('show.json', 'w') as f:
        json.dump(obj=out_words, indent=4, fp=f, ensure_ascii=False)
    return all_words


def black_list(context):
    context = re.sub('#(.+)#', '', context)
    context = re.sub('网页链接', '', context)
    context = re.sub('@(.+?) ', '', context)
    return context.replace(' ', '')


def dump(file_name):
    url = prefix + '/output/' + file_name

    conn = get_conn()
    cursor = conn.cursor()

    sql = 'SELECT * ' \
          'FROM image_after_filter;'
    cursor.execute(sql)

    # cursor.scroll(0, mode='absolute')

    results = cursor.fetchall()

    fields = cursor.description
    workbook = xlwt.Workbook()
    sheet = workbook.add_sheet('data', cell_overwrite_ok=True)

    for field in range(0, len(fields)):
        sheet.write(0, field, fields[field][0])

    for row in range(1, len(results) + 1):
        for col in range(0, len(fields)):
            sheet.write(row, col, u'%s' % results[row - 1][col])

    workbook.save(url)
    return url
