#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan  1 21:34:42 2018

@author: pc
"""
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))


# class torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)
m = nn.Conv1d(16, 33, 3, stride=2)  # 33个卷积核

# 一维卷积层，输入的尺度是(N, C_in,L_in)，输出尺度（ N,C_out,L_out）的计算方式：
# N为批次，C_in即为in_channels，即一批内输入一维数据个数，L_in是是一维数据基数
# shape:
# 输入: (N,C_in,L_in)
# 输出: (N,C_out,L_out)
inputs1 = autograd.Variable(torch.randn(20, 16, 50))  # 50是50维向量 (50-3)/2+1=24
output = m(inputs1)
print("Conv1d", output.size())
m = nn.MaxPool1d(3, stride=2)
inputs2 = autograd.Variable(torch.randn(20, 16, 50))
output = m(inputs2)
print("MaxPool1d", output.size())
