xref: /linux/drivers/gpu/drm/nouveau/dispnv50/headc37d.c (revision e5a52fd2b8cdb700b3c07b030e050a49ef3156b9)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "head.h"
23 #include "atom.h"
24 #include "core.h"
25 
26 static void
27 headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
28 {
29 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
30 	u32 *push;
31 	if ((push = evo_wait(core, 2))) {
32 		/*XXX: This is a dirty hack until OR depth handling is
33 		 *     improved later for deep colour etc.
34 		 */
35 		switch (asyh->or.depth) {
36 		case 6: asyh->or.depth = 5; break;
37 		case 5: asyh->or.depth = 4; break;
38 		case 2: asyh->or.depth = 1; break;
39 		case 0:	asyh->or.depth = 4; break;
40 		default:
41 			WARN_ON(1);
42 			break;
43 		}
44 
45 		evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
46 		evo_data(push, 0x00000001 |
47 			       asyh->or.depth << 4 |
48 			       asyh->or.nvsync << 3 |
49 			       asyh->or.nhsync << 2);
50 		evo_kick(push, core);
51 	}
52 }
53 
54 static void
55 headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
56 {
57 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
58 	u32 *push;
59 	if ((push = evo_wait(core, 2))) {
60 		evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
61 		evo_data(push, 0x80000000 |
62 			       asyh->procamp.sat.sin << 16 |
63 			       asyh->procamp.sat.cos << 4);
64 		evo_kick(push, core);
65 	}
66 }
67 
68 void
69 headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
70 {
71 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
72 	u32 *push;
73 	if ((push = evo_wait(core, 2))) {
74 		evo_mthd(push, 0x2018 + (head->base.index * 0x0400), 1);
75 		evo_data(push, asyh->dither.mode << 8 |
76 			       asyh->dither.bits << 4 |
77 			       asyh->dither.enable);
78 		evo_kick(push, core);
79 	}
80 }
81 
82 void
83 headc37d_curs_clr(struct nv50_head *head)
84 {
85 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
86 	u32 *push;
87 	if ((push = evo_wait(core, 4))) {
88 		evo_mthd(push, 0x209c + head->base.index * 0x400, 1);
89 		evo_data(push, 0x000000cf);
90 		evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
91 		evo_data(push, 0x00000000);
92 		evo_kick(push, core);
93 	}
94 }
95 
96 void
97 headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
98 {
99 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
100 	u32 *push;
101 	if ((push = evo_wait(core, 7))) {
102 		evo_mthd(push, 0x209c + head->base.index * 0x400, 2);
103 		evo_data(push, 0x80000000 |
104 			       asyh->curs.layout << 8 |
105 			       asyh->curs.format << 0);
106 		evo_data(push, 0x000072ff);
107 		evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
108 		evo_data(push, asyh->curs.handle);
109 		evo_mthd(push, 0x2090 + head->base.index * 0x400, 1);
110 		evo_data(push, asyh->curs.offset >> 8);
111 		evo_kick(push, core);
112 	}
113 }
114 
115 int
116 headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
117 		     struct nv50_head_atom *asyh)
118 {
119 	asyh->curs.format = asyw->image.format;
120 	return 0;
121 }
122 
123 static void
124 headc37d_olut_clr(struct nv50_head *head)
125 {
126 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
127 	u32 *push;
128 	if ((push = evo_wait(core, 2))) {
129 		evo_mthd(push, 0x20ac + (head->base.index * 0x400), 1);
130 		evo_data(push, 0x00000000);
131 		evo_kick(push, core);
132 	}
133 }
134 
135 static void
136 headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
137 {
138 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
139 	u32 *push;
140 	if ((push = evo_wait(core, 4))) {
141 		evo_mthd(push, 0x20a4 + (head->base.index * 0x400), 3);
142 		evo_data(push, asyh->olut.output_mode << 8 |
143 			       asyh->olut.range << 4 |
144 			       asyh->olut.size);
145 		evo_data(push, asyh->olut.offset >> 8);
146 		evo_data(push, asyh->olut.handle);
147 		evo_kick(push, core);
148 	}
149 }
150 
151 static bool
152 headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
153 {
154 	if (size != 256 && size != 1024)
155 		return false;
156 
157 	asyh->olut.mode = 2;
158 	asyh->olut.size = size == 1024 ? 2 : 0;
159 	asyh->olut.range = 0;
160 	asyh->olut.output_mode = 1;
161 	asyh->olut.load = head907d_olut_load;
162 	return true;
163 }
164 
165 static void
166 headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
167 {
168 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
169 	struct nv50_head_mode *m = &asyh->mode;
170 	u32 *push;
171 	if ((push = evo_wait(core, 13))) {
172 		evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
173 		evo_data(push, (m->v.active  << 16) | m->h.active );
174 		evo_data(push, (m->v.synce   << 16) | m->h.synce  );
175 		evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
176 		evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
177 		evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
178 		evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
179 		evo_data(push, m->interlace);
180 		evo_data(push, m->clock * 1000);
181 		evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
182 		evo_data(push, m->clock * 1000);
183 		/*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
184 		evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
185 		evo_data(push, 0x00000124);
186 		evo_kick(push, core);
187 	}
188 }
189 
190 void
191 headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
192 {
193 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
194 	u32 *push;
195 	if ((push = evo_wait(core, 4))) {
196 		evo_mthd(push, 0x204c + (head->base.index * 0x400), 1);
197 		evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
198 		evo_mthd(push, 0x2058 + (head->base.index * 0x400), 1);
199 		evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
200 		evo_kick(push, core);
201 	}
202 }
203 
204 const struct nv50_head_func
205 headc37d = {
206 	.view = headc37d_view,
207 	.mode = headc37d_mode,
208 	.olut = headc37d_olut,
209 	.olut_size = 1024,
210 	.olut_set = headc37d_olut_set,
211 	.olut_clr = headc37d_olut_clr,
212 	.curs_layout = head917d_curs_layout,
213 	.curs_format = headc37d_curs_format,
214 	.curs_set = headc37d_curs_set,
215 	.curs_clr = headc37d_curs_clr,
216 	.dither = headc37d_dither,
217 	.procamp = headc37d_procamp,
218 	.or = headc37d_or,
219 };
220