import torch
from torch import nn
from elasticsearch_dsl import Date,Text,Keyword,Integer,Document,Long

class HotType(Document):
    title = Text(analyzer='ik_max_word')
    hot = Integer()
    created_time = Long()

class MBlogType(Document):
    mid = Keyword()
    topic = Keyword()
    text = Text(analyzer='ik_max_word')
    created_time = Long()
    created_time_text = Keyword()
    author = Keyword()
    comments_count = Integer()
    reposts_count = Integer()
    attitudes_count = Integer()
    source = Keyword()
    location = Text(analyzer='ik_max_word')
    province = Keyword()
    link = Keyword()
    sentiment = Integer()

class LSTM(nn.Module):
    """
    声明LSTM网络
    """
    device = "cuda:0" if torch.cuda.is_available() else "cpu"

    def __init__(self, input_size, hidden_size, num_layers):
        super(self.LSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_size * 2, 1)  # 双向, 输出维度要*2
        self.sigmoid = nn.Sigmoid()

    def forward(self, x, lengths):
        h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(self.device)  # 双向, 第一个维度要*2
        c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(self.device)

        packed_input = torch.nn.utils.rnn.pack_padded_sequence(input=x, lengths=lengths, batch_first=True)
        packed_out, (h_n, h_c) = self.lstm(packed_input, (h0, c0))

        lstm_out = torch.cat([h_n[-2], h_n[-1]], 1)  # 双向, 所以要将最后两维拼接, 得到的就是最后一个time step的输出
        out = self.fc(lstm_out)
        out = self.sigmoid(out)
        return out