import json
import os
from elasticsearch import Elasticsearch
# from elasticsearch.helpers import bulk
from elasticsearch import helpers
from lxml import etree
import re
import time
from datetime import datetime


def import_to_db():
    es = Elasticsearch(os.environ['ES_ENDPOINT'])

    file = open("qq_tv")
    for line in file:
        line2 = line.split('\t')
        json_obj = json.loads(line2[2])
        tv = {
            'tv_name': line2[0],
            'tv_id': line2[1],
            'tv_url': json_obj['url'],
        }
        res = es.index(index="qqtv", doc_type='tv_info', body=tv, id=line2[1])
        print(res)


# curl -XDELETE 'http://localhost:9200/stackoverflow/cpp'
def import_cpp_bulk():
    # es = Elasticsearch(http_auth=('elastic', 'changeme'), timeout=2)
    # es = Elasticsearch(os.environ['ES_ENDPOINT'])
    # actions = []

    i = 0
    f = open("java_question")
    f2 = open('testJson.json', 'a', encoding="utf8")
    for line in f:
        line = etree.HTML(line)
        # line2 = line.xpath("//row")[0].attrib
        each = line.xpath("//row")[0]
        tempDict = each.attrib
        re.findall('<([^<>]*)>', tempDict['tag'])
        tempDict['tag'] = tempDict['tag']
        # print(line2["posttypeid"])
        # print(type(line2))
        # for childNode in each.getchildren():
        #     tempDict[childNode.tag] = childNode.text
        # print(type(tempDict))
        # print(tempDict)
        encode_json = json.dumps(dict(tempDict), ensure_ascii=False)
        f2.write(encode_json + "\n")
        # decode_json = json.loads(encode_json)
        # pattern = re.compile('<[^>]+>')
        # line2 = re.sub(pattern, "", line2)
        # line2 = line2.replace("<[^>]+>", "")
        # print(line2)
        # line = ET.parse(line)
        # # line = line.replace(pattern, "")
        # line = re.sub(pattern, "", line)
        # #
        # line2 = line[6:-4]
        # # line2 = line2.replace('\"', '\'')
        # line2 = line2.replace('=', '\"=')
        # line2 = line2.replace('\" ', '\",\"')
        # line2 = line2.strip()  # .split('\t')
        # line2 = line2.replace('=', ':')
        # line2 = "\"" + line2
        # # line2 = line2.replace('\"', '\'')
        # line2 = "{" + line2 + "}"
        # line2 = line2.strip()
        # # line2 = line2[1:]
        # print(line2)
        # line3 = eval(line2)
        # json_obj = json.loads(line)
        # print(PostTypeId)
        # print(line2.PostTypeId)
        # print(line3)
        # print(line3['PostTypeId'])
        # print(line3['LastEditorUserId'])
        i = i + 1
        if i % 10000 == 0:
            print(i)
        # line = line.strip().split(' ')
        # action = {
        #     "_index": "stackoverflow",
        #     "_type": "cpp",
        #     "_id": decode_json['id'],
        #     "_source": decode_json
        # }
        # actions.append(action)
        # if(len(actions) == 5000):
        #     helpers.bulk(es, actions)
        #     del actions[0:len(actions)]
    f2.close()
    # if (len(actions) > 0):
    #     helpers.bulk(es, actions)


def import_java_bulk():
    INDEX_NAME = 'stackoverflow'
    TYPE_NAME = 'java'
    es = Elasticsearch(http_auth=('elastic', 'changeme'), timeout=10)
    if not es.indices.exists(index=INDEX_NAME):
        es.indices.create(
            index=INDEX_NAME,
            body={
                'settings': {
                    # just one shard, no replicas for testing
                    'number_of_shards': 5,
                    'number_of_replicas': 0,
                },
                'mappings': {
                    TYPE_NAME: {
                        "_all": {"enabled": False},
                        "properties": {
                            'Title': {
                                'boost': 2,
                                'index': 'analyzed',
                                'store': 'yes',
                                'type': 'text',
                                "analyzer": "english",
                            },
                            'Body': {
                                'boost': 1.0,
                                'index': 'analyzed',
                                'store': 'yes',
                                'type': 'text',
                                "analyzer": "english",
                            },
                            "Tags": {
                                'boost': 3,
                                "type": "text",
                                "analyzer": "english",
                            },

                            "LastActivityDate": {"type": "date"},
                            "CreationDate": {"type": "date"},
                            "LastEditDate": {"type": "date"},
                            "ClosedDate": {"type": "date"},
                            'CommunityOwnedDate': {"type": "date"},

                            "CommentCount": {"type": "integer"},
                            "ViewCount": {"type": "integer"},
                            "AnswerCount": {"type": "integer"},
                            "FavoriteCount": {"type": "integer"},
                            "Score": {"type": "integer"},

                            "Id": {"type": "keyword"},
                            "PostTypeId": {"type": "keyword"},
                            "OwnerUserId": {"type": "keyword"},
                            "LastEditorUserId": {"type": "keyword"},
                            "AcceptedAnswerId": {"type": "keyword"},
                            "LastEditorDisplayName": {"type": "keyword"},
                            "OwnerDisplayName": {"type": "keyword"}
                        }
                    }
                }
            },
            # Will ignore 400 errors, remove to ensure you're prompted
            # ignore=400
        )

    actions = []
    f = open("../java_data")
    i = 0
    # k = 0
    time_start = time.time()
    for line in f:
        # k = k + 1
        # if k > 541000:
        #     break
        decode_json = json.loads(line)
        action = {
            "_index": INDEX_NAME,
            "_type": TYPE_NAME,
            "_id": decode_json['Id'],
            "_source": decode_json
        }
        actions.append(action)
        if(len(actions) == 10000):
            i = i + 1
            time_inter = time.time()
            spend_time = time_inter - time_start
            time_start = time_inter
            print(i * 10000, "  ", 10000 / spend_time)
            helpers.bulk(es, actions)
            del actions[0:len(actions)]
    if (len(actions) > 0):
        helpers.bulk(es, actions)
    f.close()


def import_java_ans():
    INDEX_NAME = 'answer'
    TYPE_NAME = 'java'
    es = Elasticsearch(http_auth=('elastic', 'changeme'), timeout=10)

    if not es.indices.exists(index=INDEX_NAME):
        index_body = {
            'settings': {
                'number_of_shards': 5,
                'number_of_replicas': 0,
            },
        }
        es.indices.create(index=INDEX_NAME, body=index_body)
    if not es.indices.exists_type(index=INDEX_NAME, doc_type=TYPE_NAME):
        requestbody = {
            "_all": {"enabled": False},
            "properties": {
                'Id': {
                    'boost': 1.0,
                    'index': True,
                    'store': False,
                    'type': 'keyword'
                },
                'Body': {
                    'index': False,
                    'type': 'text',
                    "analyzer": "english"
                },
                "LastActivityDate": {
                    "type": "date",
                    'index': False,
                },

                "CommentCount": {
                    "type": "integer",
                    'index': False,
                },
                "Score": {
                    "type": "integer",
                    'index': False,
                },
                "ParentId": {
                    "type": "keyword",
                    'index': True,
                },
                "PostTypeId": {
                    "type": "keyword",
                    'index': False,
                },
                "OwnerUserId": {
                    "type": "keyword",
                    'index': False,
                },
                "LastEditorUserId": {
                    "type": "keyword",
                    'index': False,
                },
                "LastEditorDisplayName": {
                    "type": "keyword",
                    'index': False,
                }
            }
        }
        es.indices.put_mapping(index=INDEX_NAME,
                               doc_type=TYPE_NAME, body=requestbody)
        # 定义test-type

    actions = []
    f = open("../java_ans")
    i = 0
    # k = 0
    time_start = time.time()
    for line in f:
        # k = k + 1
        # if k > 541000:
        #     break
        decode_json = json.loads(line)
        action = {
            "_index": INDEX_NAME,
            "_type": TYPE_NAME,
            "_id": decode_json['Id'],
            "_source": decode_json
        }
        actions.append(action)
        if(len(actions) == 10000):
            i = i + 1
            time_inter = time.time()
            spend_time = time_inter - time_start
            time_start = time_inter
            print(i * 10000, "  ", 10000 / spend_time)
            helpers.bulk(es, actions)
            del actions[0:len(actions)]
    if (len(actions) > 0):
        helpers.bulk(es, actions)
    f.close()


def main():
    # import_to_db()
    # import_cpp_bulk()
    # import_java_bulk()
    import_java_ans()


if __name__ == "__main__":
    main()

# cat /home/danson/文档/Proj/elastic_search/elastic_search/java_question |
# ./test.py > java_data


# 在做网站的时候，用到了去除html标签的问题，用正则匹配到html标签，然后replace即可。
# public static string ReplaceHtmlTag(string html, int length = 0)
# {
#     string strText = System.Text.RegularExpressions.Regex.Replace(html, "<[^>]+>", "");
#     strText = System.Text.RegularExpressions.Regex.Replace(strText, "&[^;]+;", "");
#     if (length > 0  && strText.Length > length)
#         return strText.Substring(0, length);
#     return strText;
