xref: /illumos-gate/usr/src/common/zfs/zfs_fletcher_avx512.c (revision 0886dcadf4b2cd677c3b944167f0d16ccb243616)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
23  */
24 
25 #ifdef __amd64
26 
27 #include <sys/types.h>
28 #include <sys/sunddi.h>
29 #include <sys/byteorder.h>
30 #include <sys/simd.h>
31 #include <sys/spa_checksum.h>
32 #include <zfs_fletcher.h>
33 #ifndef _KERNEL
34 #include <strings.h>
35 #endif
36 
37 static void
fletcher_4_avx512f_init(fletcher_4_ctx_t * ctx)38 fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
39 {
40 	bzero(ctx->avx512, 4 * sizeof (zfs_fletcher_avx512_t));
41 }
42 
43 static void
fletcher_4_avx512f_fini(fletcher_4_ctx_t * ctx,zio_cksum_t * zcp)44 fletcher_4_avx512f_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
45 {
46 	static const uint64_t
47 	    CcA[] = {   0,   0,   1,   3,   6,  10,  15,  21 },
48 	    CcB[] = {  28,  36,  44,  52,  60,  68,  76,  84 },
49 	    DcA[] = {   0,   0,   0,   1,   4,  10,  20,  35 },
50 	    DcB[] = {  56,  84, 120, 164, 216, 276, 344, 420 },
51 	    DcC[] = { 448, 512, 576, 640, 704, 768, 832, 896 };
52 
53 	uint64_t A, B, C, D;
54 	uint64_t i;
55 
56 	A = ctx->avx512[0].v[0];
57 	B = 8 * ctx->avx512[1].v[0];
58 	C = 64 * ctx->avx512[2].v[0] - CcB[0] * ctx->avx512[1].v[0];
59 	D = 512 * ctx->avx512[3].v[0] - DcC[0] * ctx->avx512[2].v[0] +
60 	    DcB[0] * ctx->avx512[1].v[0];
61 
62 	for (i = 1; i < 8; i++) {
63 		A += ctx->avx512[0].v[i];
64 		B += 8 * ctx->avx512[1].v[i] - i * ctx->avx512[0].v[i];
65 		C += 64 * ctx->avx512[2].v[i] - CcB[i] * ctx->avx512[1].v[i] +
66 		    CcA[i] * ctx->avx512[0].v[i];
67 		D += 512 * ctx->avx512[3].v[i] - DcC[i] * ctx->avx512[2].v[i] +
68 		    DcB[i] * ctx->avx512[1].v[i] - DcA[i] * ctx->avx512[0].v[i];
69 	}
70 
71 	ZIO_SET_CHECKSUM(zcp, A, B, C, D);
72 }
73 
74 #define	FLETCHER_4_AVX512_RESTORE_CTX(ctx)				\
75 {									\
76 	__asm("vmovdqu64 %0, %%zmm0" :: "m" ((ctx)->avx512[0]));	\
77 	__asm("vmovdqu64 %0, %%zmm1" :: "m" ((ctx)->avx512[1]));	\
78 	__asm("vmovdqu64 %0, %%zmm2" :: "m" ((ctx)->avx512[2]));	\
79 	__asm("vmovdqu64 %0, %%zmm3" :: "m" ((ctx)->avx512[3]));	\
80 }
81 
82 #define	FLETCHER_4_AVX512_SAVE_CTX(ctx)					\
83 {									\
84 	__asm("vmovdqu64 %%zmm0, %0" : "=m" ((ctx)->avx512[0]));	\
85 	__asm("vmovdqu64 %%zmm1, %0" : "=m" ((ctx)->avx512[1]));	\
86 	__asm("vmovdqu64 %%zmm2, %0" : "=m" ((ctx)->avx512[2]));	\
87 	__asm("vmovdqu64 %%zmm3, %0" : "=m" ((ctx)->avx512[3]));	\
88 }
89 
90 static void
fletcher_4_avx512f_native(fletcher_4_ctx_t * ctx,const void * buf,size_t size)91 fletcher_4_avx512f_native(fletcher_4_ctx_t *ctx, const void *buf, size_t size)
92 {
93 	const uint32_t *ip = buf;
94 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
95 
96 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
97 
98 	do {
99 		__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
100 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
101 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
102 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
103 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
104 	} while ((ip += 8) < ipend);
105 
106 	FLETCHER_4_AVX512_SAVE_CTX(ctx);
107 }
108 
109 static void
fletcher_4_avx512f_byteswap(fletcher_4_ctx_t * ctx,const void * buf,size_t size)110 fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
111     size_t size)
112 {
113 	static const uint64_t byteswap_mask = 0xFFULL;
114 	const uint32_t *ip = buf;
115 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
116 
117 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
118 
119 	__asm("vpbroadcastq %0, %%zmm8" :: "r" (byteswap_mask));
120 	__asm("vpsllq $8, %zmm8, %zmm9");
121 	__asm("vpsllq $16, %zmm8, %zmm10");
122 	__asm("vpsllq $24, %zmm8, %zmm11");
123 
124 	do {
125 		__asm("vpmovzxdq %0, %%zmm5"::"m" (*ip));
126 
127 		__asm("vpsrlq $24, %zmm5, %zmm6");
128 		__asm("vpandd %zmm8, %zmm6, %zmm6");
129 		__asm("vpsrlq $8, %zmm5, %zmm7");
130 		__asm("vpandd %zmm9, %zmm7, %zmm7");
131 		__asm("vpord %zmm6, %zmm7, %zmm4");
132 		__asm("vpsllq $8, %zmm5, %zmm6");
133 		__asm("vpandd %zmm10, %zmm6, %zmm6");
134 		__asm("vpord %zmm6, %zmm4, %zmm4");
135 		__asm("vpsllq $24, %zmm5, %zmm5");
136 		__asm("vpandd %zmm11, %zmm5, %zmm5");
137 		__asm("vpord %zmm5, %zmm4, %zmm4");
138 
139 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
140 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
141 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
142 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
143 	} while ((ip += 8) < ipend);
144 
145 	FLETCHER_4_AVX512_SAVE_CTX(ctx)
146 }
147 
148 static boolean_t
fletcher_4_avx512f_valid(void)149 fletcher_4_avx512f_valid(void)
150 {
151 	return (kfpu_allowed() && zfs_avx512f_available());
152 }
153 
154 const fletcher_4_ops_t fletcher_4_avx512f_ops = {
155 	.init_native = fletcher_4_avx512f_init,
156 	.fini_native = fletcher_4_avx512f_fini,
157 	.compute_native = fletcher_4_avx512f_native,
158 	.init_byteswap = fletcher_4_avx512f_init,
159 	.fini_byteswap = fletcher_4_avx512f_fini,
160 	.compute_byteswap = fletcher_4_avx512f_byteswap,
161 	.valid = fletcher_4_avx512f_valid,
162 	.uses_fpu_native = B_TRUE,
163 	.uses_fpu_byteswap = B_TRUE,
164 	.name = "avx512f"
165 };
166 
167 static void
fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t * ctx,const void * buf,size_t size)168 fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
169     size_t size)
170 {
171 	static const zfs_fletcher_avx512_t mask = {
172 		.v = {
173 			0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
174 			0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
175 			0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
176 			0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B
177 		}
178 	};
179 	const uint32_t *ip = buf;
180 	const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
181 
182 	FLETCHER_4_AVX512_RESTORE_CTX(ctx);
183 
184 	__asm("vmovdqu64 %0, %%zmm5" :: "m" (mask));
185 
186 	do {
187 		__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
188 
189 		__asm("vpshufb %zmm5, %zmm4, %zmm4");
190 
191 		__asm("vpaddq %zmm4, %zmm0, %zmm0");
192 		__asm("vpaddq %zmm0, %zmm1, %zmm1");
193 		__asm("vpaddq %zmm1, %zmm2, %zmm2");
194 		__asm("vpaddq %zmm2, %zmm3, %zmm3");
195 	} while ((ip += 8) < ipend);
196 
197 	FLETCHER_4_AVX512_SAVE_CTX(ctx)
198 }
199 
200 static boolean_t
fletcher_4_avx512bw_valid(void)201 fletcher_4_avx512bw_valid(void)
202 {
203 	return (kfpu_allowed() && fletcher_4_avx512f_valid() &&
204 	    zfs_avx512bw_available());
205 }
206 
207 
208 const fletcher_4_ops_t fletcher_4_avx512bw_ops = {
209 	.init_native = fletcher_4_avx512f_init,
210 	.fini_native = fletcher_4_avx512f_fini,
211 	.compute_native = fletcher_4_avx512f_native,
212 	.init_byteswap = fletcher_4_avx512f_init,
213 	.fini_byteswap = fletcher_4_avx512f_fini,
214 	.compute_byteswap = fletcher_4_avx512bw_byteswap,
215 	.valid = fletcher_4_avx512bw_valid,
216 	.uses_fpu_native = B_TRUE,
217 	.uses_fpu_byteswap = B_TRUE,
218 	.name = "avx512bw"
219 };
220 
221 #endif /* __amd64 */
222