|
12 | 12 | # See the License for the specific language governing permissions and
|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 |
| -# import paddle |
16 |
| - |
17 |
| - |
18 |
| -# def matmul_hadU(X): |
19 |
| - |
20 |
| -# input = X.clone().reshape((-1, X.shape[-1], 1)) |
21 |
| -# output = input.clone() |
22 |
| -# while input.shape[1] > 1: |
23 |
| -# input = input.reshape((input.shape[0], input.shape[1] // 2, 2, input.shape[2])) |
24 |
| -# output = output.reshape(input.shape) |
25 |
| -# output[:, :, 0, :] = input[:, :, 0, :] + input[:, :, 1, :] |
26 |
| -# output[:, :, 1, :] = input[:, :, 0, :] - input[:, :, 1, :] |
27 |
| -# output = output.reshape((input.shape[0], input.shape[1], -1)) |
28 |
| -# (input, output) = (output, input) |
29 |
| -# del output |
30 |
| - |
31 |
| -# return input.reshape(X.shape) |
32 |
| - |
33 |
| - |
34 |
| -# def random_hadamard_matrix(size, dtype, is_block=False): |
35 |
| -# if not is_block: |
36 |
| -# A = paddle.randint(low=0, high=2, shape=[size, size]).astype("float32") * 2 - 1 |
37 |
| -# Q, _ = paddle.linalg.qr(A) |
38 |
| -# return Q.astype(dtype), 1 |
39 |
| -# else: |
40 |
| -# num_blocks = size |
41 |
| -# while not (num_blocks % 2): |
42 |
| -# num_blocks = num_blocks // 2 |
43 |
| -# block_size = size // num_blocks |
44 |
| -# Q = paddle.diag(paddle.ones((block_size,), dtype="float32")) |
45 |
| -# block = matmul_hadU(Q) |
46 |
| -# large_matrix = paddle.zeros([size, size]) |
47 |
| - |
48 |
| -# for i in range(num_blocks): |
49 |
| -# start_row = i * block_size |
50 |
| -# start_col = i * block_size |
51 |
| -# large_matrix[start_row : start_row + block_size, start_col : start_col + block_size] = block |
52 |
| -# return large_matrix.cast(dtype), block_size |
53 |
| - |
54 | 15 | import paddle
|
55 | 16 |
|
56 | 17 | from paddlenlp.utils import infohub
|
|
0 commit comments