openChatformer commited on
Commit
4afb8ac
1 Parent(s): 4d00d64

yingbao chatGlm model

Browse files
pytorch_model-00006-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c40ff98941f60beb78e62571633f523167780c93ff60f07d245af6385ab9e281
3
+ size 1913133117
pytorch_model-00007-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d2929c696f0d2fc27166f9363407ef04108451fd8fc30ce4a38e08dc89b5fab
3
+ size 1878484277
quantization.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear
2
+ from torch.nn.parameter import Parameter
3
+
4
+ import bz2
5
+ import torch
6
+ import base64
7
+ import ctypes
8
+ from transformers.utils import logging
9
+
10
+ from typing import List
11
+ from functools import partial
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+ try:
16
+ from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
17
+
18
+ class Kernel:
19
+ def __init__(self, code: bytes, function_names: List[str]):
20
+ self.code = code
21
+ self._function_names = function_names
22
+ self._cmodule = LazyKernelCModule(self.code)
23
+
24
+ for name in self._function_names:
25
+ setattr(self, name, KernelFunction(self._cmodule, name))
26
+
27
+ quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
28
+
29
+ kernels = Kernel(
30
+ bz2.decompress(base64.b64decode(quantization_code)),
31
+ [
32
+ "int4WeightCompression",
33
+ "int4WeightExtractionFloat",
34
+ "int4WeightExtractionHalf",
35
+ "int8WeightExtractionFloat",
36
+ "int8WeightExtractionHalf",
37
+ ],
38
+ )
39
+ except Exception as exception:
40
+ kernels = None
41
+ logger.warning("Failed to load cpm_kernels:" + str(exception))
42
+
43
+
44
+ class W8A16Linear(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
47
+ ctx.inp_shape = inp.size()
48
+ ctx.weight_bit_width = weight_bit_width
49
+ out_features = quant_w.size(0)
50
+ inp = inp.contiguous().view(-1, inp.size(-1))
51
+ weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
52
+ ctx.weight_shape = weight.size()
53
+ output = inp.mm(weight.t())
54
+ ctx.save_for_backward(inp, quant_w, scale_w)
55
+ return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
56
+
57
+ @staticmethod
58
+ def backward(ctx, grad_output: torch.Tensor):
59
+ inp, quant_w, scale_w = ctx.saved_tensors
60
+ weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
61
+ grad_output = grad_output.contiguous().view(-1, weight.size(0))
62
+ grad_input = grad_output.mm(weight)
63
+ grad_weight = grad_output.t().mm(inp)
64
+ return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
65
+
66
+
67
+ def compress_int4_weight(weight: torch.Tensor): # (n, m)
68
+ with torch.cuda.device(weight.device):
69
+ n, m = weight.size(0), weight.size(1)
70
+ assert m % 2 == 0
71
+ m = m // 2
72
+ out = torch.empty(n, m, dtype=torch.int8, device="cuda")
73
+ stream = torch.cuda.current_stream()
74
+
75
+ gridDim = (n, 1, 1)
76
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
77
+
78
+ kernels.int4WeightCompression(
79
+ gridDim,
80
+ blockDim,
81
+ 0,
82
+ stream,
83
+ [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
84
+ )
85
+ return out
86
+
87
+
88
+ def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
89
+ if source_bit_width == 8:
90
+ func = kernels.int8WeightExtractionHalf
91
+ elif source_bit_width == 4:
92
+ func = kernels.int4WeightExtractionHalf
93
+ else:
94
+ assert False, "Unsupported bit-width"
95
+
96
+ with torch.cuda.device(weight.device):
97
+ n, m = weight.size(0), weight.size(1)
98
+ out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda")
99
+ stream = torch.cuda.current_stream()
100
+
101
+ gridDim = (n, 1, 1)
102
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
103
+
104
+ func(
105
+ gridDim,
106
+ blockDim,
107
+ 0,
108
+ stream,
109
+ [
110
+ ctypes.c_void_p(weight.data_ptr()),
111
+ ctypes.c_void_p(scale_list.data_ptr()),
112
+ ctypes.c_void_p(out.data_ptr()),
113
+ ctypes.c_int32(n),
114
+ ctypes.c_int32(m),
115
+ ],
116
+ )
117
+ return out
118
+
119
+
120
+ class QuantizedLinear(Linear):
121
+ def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs):
122
+ super(QuantizedLinear, self).__init__(*args, **kwargs)
123
+ self.weight_bit_width = weight_bit_width
124
+
125
+ shape = self.weight.shape
126
+ del self.weight
127
+
128
+ if weight_tensor is None or empty_init:
129
+ self.weight = torch.empty(
130
+ shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
131
+ )
132
+ self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"])
133
+ else:
134
+ self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()
135
+ self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
136
+ if weight_bit_width == 4:
137
+ self.weight = compress_int4_weight(self.weight)
138
+
139
+ self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
140
+ self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
141
+ if bias_tensor is not None:
142
+ self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False)
143
+ else:
144
+ self.bias = None
145
+
146
+ def forward(self, input):
147
+ output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
148
+ if self.bias is not None:
149
+ output = output + self.bias
150
+ return output
151
+
152
+
153
+ def quantize(model, weight_bit_width, empty_init=False, **kwargs):
154
+ """Replace fp16 linear with quantized linear"""
155
+
156
+ for layer in model.layers:
157
+ layer.attention.query_key_value = QuantizedLinear(
158
+ weight_bit_width=weight_bit_width,
159
+ weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()),
160
+ bias_tensor=layer.attention.query_key_value.bias,
161
+ in_features=layer.attention.query_key_value.in_features,
162
+ out_features=layer.attention.query_key_value.out_features,
163
+ bias=True,
164
+ dtype=torch.half,
165
+ device=layer.attention.query_key_value.weight.device,
166
+ empty_init=empty_init
167
+ )
168
+ layer.attention.dense = QuantizedLinear(
169
+ weight_bit_width=weight_bit_width,
170
+ weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()),
171
+ bias_tensor=layer.attention.dense.bias,
172
+ in_features=layer.attention.dense.in_features,
173
+ out_features=layer.attention.dense.out_features,
174
+ bias=True,
175
+ dtype=torch.half,
176
+ device=layer.attention.dense.weight.device,
177
+ empty_init=empty_init
178
+ )
179
+ layer.mlp.dense_h_to_4h = QuantizedLinear(
180
+ weight_bit_width=weight_bit_width,
181
+ weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
182
+ bias_tensor=layer.mlp.dense_h_to_4h.bias,
183
+ in_features=layer.mlp.dense_h_to_4h.in_features,
184
+ out_features=layer.mlp.dense_h_to_4h.out_features,
185
+ bias=True,
186
+ dtype=torch.half,
187
+ device=layer.mlp.dense_h_to_4h.weight.device,
188
+ empty_init=empty_init
189
+ )
190
+ layer.mlp.dense_4h_to_h = QuantizedLinear(
191
+ weight_bit_width=weight_bit_width,
192
+ weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
193
+ bias_tensor=layer.mlp.dense_4h_to_h.bias,
194
+ in_features=layer.mlp.dense_4h_to_h.in_features,
195
+ out_features=layer.mlp.dense_4h_to_h.out_features,
196
+ bias=True,
197
+ dtype=torch.half,
198
+ device=layer.mlp.dense_4h_to_h.weight.device,
199
+ empty_init=empty_init
200
+ )
201
+ return model
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<sop>",
3
+ "eos_token": "<eop>",
4
+ "mask_token": "[MASK]",
5
+ "pad_token": "<pad>",
6
+ "unk_token": "<unk>"
7
+ }
tokenization_chatglm.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tokenization classes for ChatGLM."""
2
+ from typing import List, Optional, Union
3
+ import os
4
+
5
+ from transformers.tokenization_utils import PreTrainedTokenizer
6
+ from transformers.utils import logging, PaddingStrategy
7
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
8
+ from typing import Dict
9
+ import sentencepiece as spm
10
+ import numpy as np
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
15
+ "THUDM/chatglm-6b": 2048,
16
+ }
17
+
18
+
19
+ class TextTokenizer:
20
+ def __init__(self, model_path):
21
+ self.sp = spm.SentencePieceProcessor()
22
+ self.sp.Load(model_path)
23
+ self.num_tokens = self.sp.vocab_size()
24
+
25
+ def encode(self, text):
26
+ return self.sp.EncodeAsIds(text)
27
+
28
+ def decode(self, ids: List[int]):
29
+ return self.sp.DecodeIds(ids)
30
+
31
+ def tokenize(self, text):
32
+ return self.sp.EncodeAsPieces(text)
33
+
34
+ def convert_tokens_to_ids(self, tokens):
35
+ return [self.sp.PieceToId(token) for token in tokens]
36
+
37
+ def convert_token_to_id(self, token):
38
+ return self.sp.PieceToId(token)
39
+
40
+ def convert_id_to_token(self, idx):
41
+ return self.sp.IdToPiece(idx)
42
+
43
+ def __len__(self):
44
+ return self.num_tokens
45
+
46
+
47
+ class SPTokenizer:
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ num_image_tokens=20000,
52
+ max_blank_length=80,
53
+ byte_fallback=True,
54
+ ):
55
+ assert vocab_file is not None
56
+ self.vocab_file = vocab_file
57
+ self.num_image_tokens = num_image_tokens
58
+ self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"]
59
+ self.max_blank_length = max_blank_length
60
+ self.byte_fallback = byte_fallback
61
+ self.text_tokenizer = TextTokenizer(vocab_file)
62
+
63
+ def _get_text_tokenizer(self):
64
+ return self.text_tokenizer
65
+
66
+ @staticmethod
67
+ def get_blank_token(length: int):
68
+ assert length >= 2
69
+ return f"<|blank_{length}|>"
70
+
71
+ @staticmethod
72
+ def get_tab_token():
73
+ return f"<|tab|>"
74
+
75
+ @property
76
+ def num_text_tokens(self):
77
+ return self.text_tokenizer.num_tokens
78
+
79
+ @property
80
+ def num_tokens(self):
81
+ return self.num_image_tokens + self.num_text_tokens
82
+
83
+ @staticmethod
84
+ def _encode_whitespaces(text: str, max_len: int = 80):
85
+ text = text.replace("\t", SPTokenizer.get_tab_token())
86
+ for i in range(max_len, 1, -1):
87
+ text = text.replace(" " * i, SPTokenizer.get_blank_token(i))
88
+ return text
89
+
90
+ def _preprocess(self, text: str, linebreak=True, whitespaces=True):
91
+ if linebreak:
92
+ text = text.replace("\n", "<n>")
93
+ if whitespaces:
94
+ text = self._encode_whitespaces(text, max_len=self.max_blank_length)
95
+ return text
96
+
97
+ def encode(
98
+ self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
99
+ ) -> List[int]:
100
+ """
101
+ @param text: Text to encode.
102
+ @param linebreak: Whether to encode newline (\n) in text.
103
+ @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
104
+ @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
105
+ @param add_dummy_prefix: Whether to add dummy blank space in the beginning.
106
+ """
107
+ text = self._preprocess(text, linebreak, whitespaces)
108
+ if not add_dummy_prefix:
109
+ text = "<n>" + text
110
+ tmp = self._get_text_tokenizer().encode(text)
111
+ tokens = [x + self.num_image_tokens for x in tmp]
112
+ return tokens if add_dummy_prefix else tokens[2:]
113
+
114
+ def decode(self, text_ids: List[int]) -> str:
115
+ ids = [int(_id) - self.num_image_tokens for _id in text_ids]
116
+ ids = [_id for _id in ids if _id >= 0]
117
+ text = self._get_text_tokenizer().decode(ids)
118
+ text = text.replace("<n>", "\n")
119
+ text = text.replace(SPTokenizer.get_tab_token(), "\t")
120
+ for i in range(2, self.max_blank_length + 1):
121
+ text = text.replace(self.get_blank_token(i), " " * i)
122
+ return text
123
+
124
+ def tokenize(
125
+ self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
126
+ ) -> List[str]:
127
+ """
128
+ @param text: Text to encode.
129
+ @param linebreak: Whether to encode newline (\n) in text.
130
+ @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
131
+ @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
132
+ @param add_dummy_prefix: Whether to add dummy blank space in the beginning.
133
+ """
134
+ text = self._preprocess(text, linebreak, whitespaces)
135
+ if not add_dummy_prefix:
136
+ text = "<n>" + text
137
+ tokens = self._get_text_tokenizer().tokenize(text)
138
+ return tokens if add_dummy_prefix else tokens[2:]
139
+
140
+ def __getitem__(self, x: Union[int, str]):
141
+ if isinstance(x, int):
142
+ if x < self.num_image_tokens:
143
+ return "<image_{}>".format(x)
144
+ else:
145
+ return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
146
+ elif isinstance(x, str):
147
+ if x.startswith("<image_") and x.endswith(">") and x[7:-1].isdigit():
148
+ return int(x[7:-1])
149
+ else:
150
+ return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
151
+ else:
152
+ raise ValueError("The key should be str or int.")
153
+
154
+
155
+ class ChatGLMTokenizer(PreTrainedTokenizer):
156
+ """
157
+ Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding.
158
+
159
+ Args:
160
+ vocab_file (`str`):
161
+ Path to the vocabulary file.
162
+ """
163
+
164
+ vocab_files_names = {"vocab_file": "ice_text.model"}
165
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
166
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
167
+
168
+ def __init__(
169
+ self,
170
+ vocab_file,
171
+ do_lower_case=False,
172
+ remove_space=False,
173
+ bos_token='<sop>',
174
+ eos_token='<eop>',
175
+ end_token='</s>',
176
+ mask_token='[MASK]',
177
+ gmask_token='[gMASK]',
178
+ padding_side="left",
179
+ pad_token="<pad>",
180
+ unk_token="<unk>",
181
+ num_image_tokens=20000,
182
+ **kwargs
183
+ ) -> None:
184
+ super().__init__(
185
+ do_lower_case=do_lower_case,
186
+ remove_space=remove_space,
187
+ padding_side=padding_side,
188
+ bos_token=bos_token,
189
+ eos_token=eos_token,
190
+ end_token=end_token,
191
+ mask_token=mask_token,
192
+ gmask_token=gmask_token,
193
+ pad_token=pad_token,
194
+ unk_token=unk_token,
195
+ num_image_tokens=num_image_tokens,
196
+ **kwargs
197
+ )
198
+
199
+ self.do_lower_case = do_lower_case
200
+ self.remove_space = remove_space
201
+ self.vocab_file = vocab_file
202
+
203
+ self.bos_token = bos_token
204
+ self.eos_token = eos_token
205
+ self.end_token = end_token
206
+ self.mask_token = mask_token
207
+ self.gmask_token = gmask_token
208
+
209
+ self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)
210
+
211
+ """ Initialisation """
212
+
213
+ @property
214
+ def gmask_token_id(self) -> Optional[int]:
215
+ if self.gmask_token is None:
216
+ return None
217
+ return self.convert_tokens_to_ids(self.gmask_token)
218
+
219
+ @property
220
+ def end_token_id(self) -> Optional[int]:
221
+ """
222
+ `Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been
223
+ set.
224
+ """
225
+ if self.end_token is None:
226
+ return None
227
+ return self.convert_tokens_to_ids(self.end_token)
228
+
229
+ @property
230
+ def vocab_size(self):
231
+ """ Returns vocab size """
232
+ return self.sp_tokenizer.num_tokens
233
+
234
+ def get_vocab(self):
235
+ """ Returns vocab as a dict """
236
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
237
+ vocab.update(self.added_tokens_encoder)
238
+ return vocab
239
+
240
+ def preprocess_text(self, inputs):
241
+ if self.remove_space:
242
+ outputs = " ".join(inputs.strip().split())
243
+ else:
244
+ outputs = inputs
245
+
246
+ if self.do_lower_case:
247
+ outputs = outputs.lower()
248
+
249
+ return outputs
250
+
251
+ def _tokenize(self, text, **kwargs):
252
+ """ Returns a tokenized string. """
253
+ text = self.preprocess_text(text)
254
+
255
+ seq = self.sp_tokenizer.tokenize(text)
256
+
257
+ return seq
258
+
259
+ def _decode(
260
+ self,
261
+ token_ids: Union[int, List[int]],
262
+ skip_special_tokens: bool = False,
263
+ clean_up_tokenization_spaces: bool = True,
264
+ **kwargs
265
+ ) -> str:
266
+ if isinstance(token_ids, int):
267
+ token_ids = [token_ids]
268
+ if len(token_ids) == 0:
269
+ return ""
270
+ if self.pad_token_id in token_ids: # remove pad
271
+ token_ids = list(filter((self.pad_token_id).__ne__, token_ids))
272
+ return self.sp_tokenizer.decode(token_ids)
273
+
274
+ def _convert_token_to_id(self, token):
275
+ """ Converts a token (str) in an id using the vocab. """
276
+ return self.sp_tokenizer[token]
277
+
278
+ def _convert_id_to_token(self, index):
279
+ """Converts an index (integer) in a token (str) using the vocab."""
280
+ return self.sp_tokenizer[index]
281
+
282
+ def save_vocabulary(self, save_directory, filename_prefix=None):
283
+ """
284
+ Save the vocabulary and special tokens file to a directory.
285
+
286
+ Args:
287
+ save_directory (`str`):
288
+ The directory in which to save the vocabulary.
289
+ filename_prefix (`str`, *optional*):
290
+ An optional prefix to add to the named of the saved files.
291
+
292
+ Returns:
293
+ `Tuple(str)`: Paths to the files saved.
294
+ """
295
+ if os.path.isdir(save_directory):
296
+ vocab_file = os.path.join(
297
+ save_directory, self.vocab_files_names["vocab_file"]
298
+ )
299
+ else:
300
+ vocab_file = save_directory
301
+
302
+ with open(self.vocab_file, 'rb') as fin:
303
+ proto_str = fin.read()
304
+
305
+ with open(vocab_file, "wb") as writer:
306
+ writer.write(proto_str)
307
+
308
+ return (vocab_file,)
309
+
310
+ def build_inputs_with_special_tokens(
311
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
312
+ ) -> List[int]:
313
+ """
314
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
315
+ adding special tokens. A BERT sequence has the following format:
316
+
317
+ - single sequence: `[CLS] X [SEP]`
318
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
319
+
320
+ Args:
321
+ token_ids_0 (`List[int]`):
322
+ List of IDs to which the special tokens will be added.
323
+ token_ids_1 (`List[int]`, *optional*):
324
+ Optional second list of IDs for sequence pairs.
325
+
326
+ Returns:
327
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
328
+ """
329
+ gmask_id = self.sp_tokenizer[self.gmask_token]
330
+ eos_id = self.sp_tokenizer[self.eos_token]
331
+ token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]]
332
+ if token_ids_1 is not None:
333
+ token_ids_0 = token_ids_0 + token_ids_1 + [eos_id]
334
+ return token_ids_0
335
+
336
+ def _pad(
337
+ self,
338
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
339
+ max_length: Optional[int] = None,
340
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
341
+ pad_to_multiple_of: Optional[int] = None,
342
+ return_attention_mask: Optional[bool] = None,
343
+ ) -> dict:
344
+ """
345
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
346
+
347
+ Args:
348
+ encoded_inputs:
349
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
350
+ max_length: maximum length of the returned list and optionally padding length (see below).
351
+ Will truncate by taking into account the special tokens.
352
+ padding_strategy: PaddingStrategy to use for padding.
353
+
354
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
355
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
356
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
357
+ The tokenizer padding sides are defined in self.padding_side:
358
+
359
+ - 'left': pads on the left of the sequences
360
+ - 'right': pads on the right of the sequences
361
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
362
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
363
+ `>= 7.5` (Volta).
364
+ return_attention_mask:
365
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
366
+ """
367
+ # Load from model defaults
368
+ bos_token_id = self.sp_tokenizer[self.bos_token]
369
+ mask_token_id = self.sp_tokenizer[self.mask_token]
370
+ gmask_token_id = self.sp_tokenizer[self.gmask_token]
371
+ assert self.padding_side == "left"
372
+
373
+ required_input = encoded_inputs[self.model_input_names[0]]
374
+ seq_length = len(required_input)
375
+
376
+ if padding_strategy == PaddingStrategy.LONGEST:
377
+ max_length = len(required_input)
378
+
379
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
380
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
381
+
382
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
383
+
384
+ # Initialize attention mask if not present.
385
+ if max_length is not None:
386
+ if "attention_mask" not in encoded_inputs:
387
+ if bos_token_id in required_input:
388
+ context_length = required_input.index(bos_token_id)
389
+ else:
390
+ context_length = seq_length
391
+ attention_mask = np.ones((1, seq_length, seq_length))
392
+ attention_mask = np.tril(attention_mask)
393
+ attention_mask[:, :, :context_length] = 1
394
+ attention_mask = np.bool_(attention_mask < 0.5)
395
+ encoded_inputs["attention_mask"] = attention_mask
396
+
397
+ if "position_ids" not in encoded_inputs:
398
+ if bos_token_id in required_input:
399
+ context_length = required_input.index(bos_token_id)
400
+ else:
401
+ context_length = seq_length
402
+ position_ids = np.arange(seq_length, dtype=np.int64)
403
+ mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id
404
+ if mask_token in required_input:
405
+ mask_position = required_input.index(mask_token)
406
+ position_ids[context_length:] = mask_position
407
+ block_position_ids = np.concatenate(
408
+ [np.zeros(context_length, dtype=np.int64),
409
+ np.arange(1, seq_length - context_length + 1, dtype=np.int64)])
410
+ encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0)
411
+
412
+ if needs_to_be_padded:
413
+ difference = max_length - len(required_input)
414
+
415
+ if "attention_mask" in encoded_inputs:
416
+ encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"],
417
+ pad_width=[(0, 0), (difference, 0), (difference, 0)],
418
+ mode='constant', constant_values=True)
419
+ if "token_type_ids" in encoded_inputs:
420
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
421
+ "token_type_ids"
422
+ ]
423
+ if "special_tokens_mask" in encoded_inputs:
424
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
425
+ if "position_ids" in encoded_inputs:
426
+ encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"],
427
+ pad_width=[(0, 0), (difference, 0)])
428
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
429
+
430
+ return encoded_inputs
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_chatglm.ChatGLMTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "bos_token": "<sop>",
9
+ "do_lower_case": false,
10
+ "end_token": "</s>",
11
+ "eos_token": "<eop>",
12
+ "gmask_token": "[gMASK]",
13
+ "mask_token": "[MASK]",
14
+ "model_max_length": 2048,
15
+ "num_image_tokens": 0,
16
+ "pad_token": "<pad>",
17
+ "padding_side": "left",
18
+ "remove_space": false,
19
+ "special_tokens_map_file": null,
20
+ "tokenizer_class": "ChatGLMTokenizer",
21
+ "unk_token": "<unk>"
22
+ }