File size: 6,887 Bytes
0ec1273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10406.263945578237\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import librosa\n",
    "import numpy as np\n",
    "path = \"./9nine/nimi_sora/reference_audio\"\n",
    "files = os.listdir(path)\n",
    "time = 0\n",
    "for file in files:\n",
    "    data, fs = librosa.load(path + \"/\" + file)\n",
    "    time += len(data) / fs\n",
    "print(time)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2.890627777777778"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "10406.26/60/60"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False, False,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False, False, False,  True,  True,  True,  True],\n",
      "        [False, False, False, False, False, False, False,  True,  True,  True],\n",
      "        [False, False, False, False, False, False, False, False,  True,  True],\n",
      "        [False, False, False, False, False, False, False, False, False,  True],\n",
      "        [False, False, False, False, False, False, False, False, False, False]])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F\n",
    "x = torch.randn(3, 4, 5)\n",
    "y = torch.randn(3, 6, 5)\n",
    "x_len = x.shape[1]\n",
    "y_len = y.shape[1]\n",
    "\n",
    "x_attn_mask = F.pad(\n",
    "    torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),\n",
    "    (0, y_len),\n",
    "    value=True, )\n",
    "y_attn_mask = F.pad(\n",
    "    torch.triu(\n",
    "        torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),\n",
    "        diagonal=1, ),\n",
    "    (x_len, 0),\n",
    "    value=False, )\n",
    "xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)\n",
    "bsz, src_len = x.shape[0], x_len + y_len\n",
    "_xy_padding_mask = (ar_xy_padding_mask.view(bsz, 1, 1, src_len)\n",
    "                    .expand(-1, self.num_head, -1, -1)\n",
    "                    .reshape(bsz * self.num_head, 1, src_len))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False,  True,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False, False,  True,  True,  True,  True,  True],\n",
      "        [False, False, False, False, False, False,  True,  True,  True,  True],\n",
      "        [False, False, False, False, False, False, False,  True,  True,  True],\n",
      "        [False, False, False, False, False, False, False, False,  True,  True],\n",
      "        [False, False, False, False, False, False, False, False, False,  True],\n",
      "        [False, False, False, False, False, False, False, False, False, False]])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from AR.models.utils import make_pad_mask\n",
    "#         self.ar_text_embedding = TokenEmbedding(\n",
    "#            self.embedding_dim, self.phoneme_vocab_size, self.p_dropout) vocab_size = 512, embedding_dim = 512\n",
    "\n",
    "# x = x + self.bert_proj(bert_feature.transpose(1,2))\n",
    "# x = self.ar_text_position(x)\n",
    "# x_mask = make_pad_mask(x_lens)\n",
    "\n",
    "# y_mask = make_pad_mask(y_lens)\n",
    "# y_mask_int = y_mask.type(torch.int64)\n",
    "# codes = y.type(torch.int64) * (1 - y_mask_int)\n",
    "\n",
    "# Training\n",
    "# AR Decoder\n",
    "# 将x(文本)和y(音频)的token拼起来,emb维度是512\n",
    "# 测试一个简单的case\n",
    "num_head = 8\n",
    "\n",
    "\n",
    "x_len = 4\n",
    "y_len = 6\n",
    "\n",
    "x_attn_mask = F.pad(\n",
    "    torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),\n",
    "    (0, y_len),\n",
    "    value=True, )\n",
    "y_attn_mask = F.pad(\n",
    "    torch.triu(\n",
    "        torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),\n",
    "        diagonal=1, ),\n",
    "    (x_len, 0),\n",
    "    value=False, )\n",
    "xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)\n",
    "bsz, src_len = x.shape[0], x_len + y_len\n",
    "print(xy_attn_mask)\n",
    "# _xy_padding_mask = (ar_xy_padding_mask.view(bsz, 1, 1, src_len)\n",
    "#                     .expand(-1, num_head, -1, -1)\n",
    "#                     .reshape(bsz * num_head, 1, src_len))\n",
    "# xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)\n",
    "# new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)\n",
    "# new_attn_mask.masked_fill_(xy_attn_mask, float(\"-inf\"))\n",
    "# xy_attn_mask = new_attn_mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([324, 192])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import AR.models.t2s_model\n",
    "# model = AR.models.t2s_model.Text2SemanticDecoder()\n",
    "dict = torch.load(r\"D:\\pyprojs\\GPT-SoVITSs\\fork\\GPT-SoVITS-NIMI_SORA\\9nine\\nimi_sora\\sora_e5_s3275.pth\")\n",
    "dict[\"weight\"][\"enc_p.text_embedding.weight\"].shape"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "vits",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}