2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
80 ae_slice->ctx_mask_assigned = 0;
82 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83 if (!ae_slice->region)
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->region;
94 kfree(ae_slice->region);
95 ae_slice->region = NULL;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (i = 0; i < ae_data->slice_num; i++) {
109 kfree(ae_data->ae_slices[i].region);
110 ae_data->ae_slices[i].region = NULL;
111 kfree(ae_data->ae_slices[i].page);
112 ae_data->ae_slices[i].page = NULL;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
122 return (char *)(((unsigned long)(str_table->strings)) + str_offset);
125 static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
142 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143 unsigned int addr, unsigned int *val,
144 unsigned int num_in_bytes)
147 unsigned char *ptr = (unsigned char *)val;
149 while (num_in_bytes) {
150 memcpy(&outval, ptr, 4);
151 SRAM_WRITE(handle, addr, outval);
158 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159 unsigned char ae, unsigned int addr,
161 unsigned int num_in_bytes)
164 unsigned char *ptr = (unsigned char *)val;
166 addr >>= 0x2; /* convert to uword address */
168 while (num_in_bytes) {
169 memcpy(&outval, ptr, 4);
170 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
176 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
178 struct icp_qat_uof_batch_init
181 struct icp_qat_uof_batch_init *umem_init;
183 if (!umem_init_header)
185 umem_init = umem_init_header->next;
187 unsigned int addr, *value, size;
190 addr = umem_init->addr;
191 value = umem_init->value;
192 size = umem_init->size;
193 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194 umem_init = umem_init->next;
199 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200 struct icp_qat_uof_batch_init **base)
202 struct icp_qat_uof_batch_init *umem_init;
206 struct icp_qat_uof_batch_init *pre;
209 umem_init = umem_init->next;
215 static int qat_uclo_parse_num(char *str, unsigned int *num)
218 unsigned long ae = 0;
221 strncpy(buf, str, 15);
222 for (i = 0; i < 16; i++) {
223 if (!isdigit(buf[i])) {
228 if ((kstrtoul(buf, 10, &ae)))
231 *num = (unsigned int)ae;
235 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
236 struct icp_qat_uof_initmem *init_mem,
237 unsigned int size_range, unsigned int *ae)
239 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
242 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
243 pr_err("QAT: initmem is out of range");
246 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
247 pr_err("QAT: Memory scope for init_mem error\n");
250 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
252 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
255 if (qat_uclo_parse_num(str, ae)) {
256 pr_err("QAT: Parse num for AE number failed\n");
259 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
260 pr_err("QAT: ae %d out of range\n", *ae);
266 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
267 *handle, struct icp_qat_uof_initmem
268 *init_mem, unsigned int ae,
269 struct icp_qat_uof_batch_init
272 struct icp_qat_uof_batch_init *init_header, *tail;
273 struct icp_qat_uof_batch_init *mem_init, *tail_old;
274 struct icp_qat_uof_memvar_attr *mem_val_attr;
275 unsigned int i, flag = 0;
278 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
279 sizeof(struct icp_qat_uof_initmem));
281 init_header = *init_tab_base;
283 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
286 init_header->size = 1;
287 *init_tab_base = init_header;
290 tail_old = init_header;
291 while (tail_old->next)
292 tail_old = tail_old->next;
294 for (i = 0; i < init_mem->val_attr_num; i++) {
295 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
299 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
300 mem_init->value = &mem_val_attr->value;
302 mem_init->next = NULL;
303 tail->next = mem_init;
305 init_header->size += qat_hal_get_ins_num();
311 mem_init = tail_old->next;
316 kfree(*init_tab_base);
320 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
321 struct icp_qat_uof_initmem *init_mem)
323 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
326 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
327 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
329 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
330 &obj_handle->lm_init_tab[ae]))
335 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
336 struct icp_qat_uof_initmem *init_mem)
338 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
339 unsigned int ae, ustore_size, uaddr, i;
341 ustore_size = obj_handle->ustore_phy_size;
342 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
344 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
345 &obj_handle->umem_init_tab[ae]))
347 /* set the highest ustore address referenced */
348 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
349 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
350 if (obj_handle->ae_data[ae].ae_slices[i].
351 encap_image->uwords_num < uaddr)
352 obj_handle->ae_data[ae].ae_slices[i].
353 encap_image->uwords_num = uaddr;
358 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
359 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
360 struct icp_qat_uof_initmem *init_mem)
362 switch (init_mem->region) {
363 case ICP_QAT_UOF_LMEM_REGION:
364 if (qat_uclo_init_lmem_seg(handle, init_mem))
367 case ICP_QAT_UOF_UMEM_REGION:
368 if (qat_uclo_init_umem_seg(handle, init_mem))
372 pr_err("QAT: initmem region error. region type=0x%x\n",
379 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
380 struct icp_qat_uclo_encapme *image)
383 struct icp_qat_uclo_encap_page *page;
384 struct icp_qat_uof_image *uof_image;
386 unsigned int ustore_size;
387 unsigned int patt_pos;
388 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
391 uof_image = image->img_ptr;
392 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
396 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
397 memcpy(&fill_data[i], &uof_image->fill_pattern,
401 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
402 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
404 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
405 patt_pos = page->beg_addr_p + page->micro_words_num;
407 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
408 page->beg_addr_p, &fill_data[0]);
409 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
410 ustore_size - patt_pos + 1,
411 &fill_data[page->beg_addr_p]);
417 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
420 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
421 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
423 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
424 if (initmem->num_in_bytes) {
425 if (qat_uclo_init_ae_memory(handle, initmem))
428 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
429 (unsigned long)initmem +
430 sizeof(struct icp_qat_uof_initmem)) +
431 (sizeof(struct icp_qat_uof_memvar_attr) *
432 initmem->val_attr_num));
434 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
435 if (qat_hal_batch_wr_lm(handle, ae,
436 obj_handle->lm_init_tab[ae])) {
437 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
440 qat_uclo_cleanup_batch_init_list(handle,
441 &obj_handle->lm_init_tab[ae]);
442 qat_uclo_batch_wr_umem(handle, ae,
443 obj_handle->umem_init_tab[ae]);
444 qat_uclo_cleanup_batch_init_list(handle,
451 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
452 char *chunk_id, void *cur)
455 struct icp_qat_uof_chunkhdr *chunk_hdr =
456 (struct icp_qat_uof_chunkhdr *)
457 ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
459 for (i = 0; i < obj_hdr->num_chunks; i++) {
460 if ((cur < (void *)&chunk_hdr[i]) &&
461 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
462 ICP_QAT_UOF_OBJID_LEN)) {
463 return &chunk_hdr[i];
469 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
472 unsigned int topbit = 1 << 0xF;
473 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
475 reg ^= inbyte << 0x8;
476 for (i = 0; i < 0x8; i++) {
478 reg = (reg << 1) ^ 0x1021;
485 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
487 unsigned int chksum = 0;
491 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
495 static struct icp_qat_uclo_objhdr *
496 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
499 struct icp_qat_uof_filechunkhdr *file_chunk;
500 struct icp_qat_uclo_objhdr *obj_hdr;
504 file_chunk = (struct icp_qat_uof_filechunkhdr *)
505 (buf + sizeof(struct icp_qat_uof_filehdr));
506 for (i = 0; i < file_hdr->num_chunks; i++) {
507 if (!strncmp(file_chunk->chunk_id, chunk_id,
508 ICP_QAT_UOF_OBJID_LEN)) {
509 chunk = buf + file_chunk->offset;
510 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
511 chunk, file_chunk->size))
513 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
516 obj_hdr->file_buff = chunk;
517 obj_hdr->checksum = file_chunk->checksum;
518 obj_hdr->size = file_chunk->size;
527 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
528 struct icp_qat_uof_image *image)
530 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
531 struct icp_qat_uof_objtable *neigh_reg_tab;
532 struct icp_qat_uof_code_page *code_page;
534 code_page = (struct icp_qat_uof_code_page *)
535 ((char *)image + sizeof(struct icp_qat_uof_image));
536 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
537 code_page->uc_var_tab_offset);
538 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
539 code_page->imp_var_tab_offset);
540 imp_expr_tab = (struct icp_qat_uof_objtable *)
541 (encap_uof_obj->beg_uof +
542 code_page->imp_expr_tab_offset);
543 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
544 imp_expr_tab->entry_num) {
545 pr_err("QAT: UOF can't contain imported variable to be parsed");
548 neigh_reg_tab = (struct icp_qat_uof_objtable *)
549 (encap_uof_obj->beg_uof +
550 code_page->neigh_reg_tab_offset);
551 if (neigh_reg_tab->entry_num) {
552 pr_err("QAT: UOF can't contain shared control store feature");
555 if (image->numpages > 1) {
556 pr_err("QAT: UOF can't contain multiple pages");
559 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
560 pr_err("QAT: UOF can't use shared control store feature");
563 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
564 pr_err("QAT: UOF can't use reloadable feature");
570 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
572 struct icp_qat_uof_image *img,
573 struct icp_qat_uclo_encap_page *page)
575 struct icp_qat_uof_code_page *code_page;
576 struct icp_qat_uof_code_area *code_area;
577 struct icp_qat_uof_objtable *uword_block_tab;
578 struct icp_qat_uof_uword_block *uwblock;
581 code_page = (struct icp_qat_uof_code_page *)
582 ((char *)img + sizeof(struct icp_qat_uof_image));
583 page->def_page = code_page->def_page;
584 page->page_region = code_page->page_region;
585 page->beg_addr_v = code_page->beg_addr_v;
586 page->beg_addr_p = code_page->beg_addr_p;
587 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
588 code_page->code_area_offset);
589 page->micro_words_num = code_area->micro_words_num;
590 uword_block_tab = (struct icp_qat_uof_objtable *)
591 (encap_uof_obj->beg_uof +
592 code_area->uword_block_tab);
593 page->uwblock_num = uword_block_tab->entry_num;
594 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
595 sizeof(struct icp_qat_uof_objtable));
596 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
597 for (i = 0; i < uword_block_tab->entry_num; i++)
598 page->uwblock[i].micro_words =
599 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
602 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
603 struct icp_qat_uclo_encapme *ae_uimage,
607 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
608 struct icp_qat_uof_image *image;
609 struct icp_qat_uof_objtable *ae_regtab;
610 struct icp_qat_uof_objtable *init_reg_sym_tab;
611 struct icp_qat_uof_objtable *sbreak_tab;
612 struct icp_qat_uof_encap_obj *encap_uof_obj =
613 &obj_handle->encap_uof_obj;
615 for (j = 0; j < max_image; j++) {
616 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
617 ICP_QAT_UOF_IMAG, chunk_hdr);
620 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
622 ae_regtab = (struct icp_qat_uof_objtable *)
623 (image->reg_tab_offset +
624 obj_handle->obj_hdr->file_buff);
625 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
626 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
627 (((char *)ae_regtab) +
628 sizeof(struct icp_qat_uof_objtable));
629 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
630 (image->init_reg_sym_tab +
631 obj_handle->obj_hdr->file_buff);
632 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
633 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
634 (((char *)init_reg_sym_tab) +
635 sizeof(struct icp_qat_uof_objtable));
636 sbreak_tab = (struct icp_qat_uof_objtable *)
637 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
638 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
639 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
640 (((char *)sbreak_tab) +
641 sizeof(struct icp_qat_uof_objtable));
642 ae_uimage[j].img_ptr = image;
643 if (qat_uclo_check_image_compat(encap_uof_obj, image))
646 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
648 if (!ae_uimage[j].page)
650 qat_uclo_map_image_page(encap_uof_obj, image,
655 for (i = 0; i < j; i++)
656 kfree(ae_uimage[i].page);
660 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
664 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
666 for (ae = 0; ae <= max_ae; ae++) {
668 (unsigned long *)&handle->hal_handle->ae_mask))
670 for (i = 0; i < obj_handle->uimage_num; i++) {
671 if (!test_bit(ae, (unsigned long *)
672 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
675 if (qat_uclo_init_ae_data(obj_handle, ae, i))
680 pr_err("QAT: uimage uses AE not set");
686 static struct icp_qat_uof_strtable *
687 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
688 char *tab_name, struct icp_qat_uof_strtable *str_table)
690 struct icp_qat_uof_chunkhdr *chunk_hdr;
692 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
693 obj_hdr->file_buff, tab_name, NULL);
697 memcpy(&str_table->table_len, obj_hdr->file_buff +
698 chunk_hdr->offset, sizeof(str_table->table_len));
699 hdr_size = (char *)&str_table->strings - (char *)str_table;
700 str_table->strings = (unsigned long)obj_hdr->file_buff +
701 chunk_hdr->offset + hdr_size;
708 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
709 struct icp_qat_uclo_init_mem_table *init_mem_tab)
711 struct icp_qat_uof_chunkhdr *chunk_hdr;
713 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
714 ICP_QAT_UOF_IMEM, NULL);
716 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
717 chunk_hdr->offset, sizeof(unsigned int));
718 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
719 (encap_uof_obj->beg_uof + chunk_hdr->offset +
720 sizeof(unsigned int));
724 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
726 unsigned int maj_ver, prod_type = obj_handle->prod_type;
728 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
729 pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
730 obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
733 maj_ver = obj_handle->prod_rev & 0xff;
734 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
735 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
736 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
742 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
743 unsigned char ae, unsigned char ctx_mask,
744 enum icp_qat_uof_regtype reg_type,
745 unsigned short reg_addr, unsigned int value)
753 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
764 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
771 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
774 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
776 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
782 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
784 struct icp_qat_uclo_encapme *encap_ae)
787 unsigned char ctx_mask;
788 struct icp_qat_uof_init_regsym *init_regsym;
790 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
791 ICP_QAT_UCLO_MAX_CTX)
796 for (i = 0; i < encap_ae->init_regsym_num; i++) {
797 unsigned int exp_res;
799 init_regsym = &encap_ae->init_regsym[i];
800 exp_res = init_regsym->value;
801 switch (init_regsym->init_type) {
802 case ICP_QAT_UOF_INIT_REG:
803 qat_uclo_init_reg(handle, ae, ctx_mask,
804 (enum icp_qat_uof_regtype)
805 init_regsym->reg_type,
806 (unsigned short)init_regsym->reg_addr,
809 case ICP_QAT_UOF_INIT_REG_CTX:
810 /* check if ctx is appropriate for the ctxMode */
811 if (!((1 << init_regsym->ctx) & ctx_mask)) {
812 pr_err("QAT: invalid ctx num = 0x%x\n",
816 qat_uclo_init_reg(handle, ae,
818 (1 << init_regsym->ctx),
819 (enum icp_qat_uof_regtype)
820 init_regsym->reg_type,
821 (unsigned short)init_regsym->reg_addr,
824 case ICP_QAT_UOF_INIT_EXPR:
825 pr_err("QAT: INIT_EXPR feature not supported\n");
827 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
828 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
837 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
839 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
842 if (obj_handle->global_inited)
844 if (obj_handle->init_mem_tab.entry_num) {
845 if (qat_uclo_init_memory(handle)) {
846 pr_err("QAT: initialize memory failed\n");
850 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
851 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
852 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
854 if (qat_uclo_init_reg_sym(handle, ae,
855 obj_handle->ae_data[ae].
856 ae_slices[s].encap_image))
860 obj_handle->global_inited = 1;
864 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
866 unsigned char ae, nn_mode, s;
867 struct icp_qat_uof_image *uof_image;
868 struct icp_qat_uclo_aedata *ae_data;
869 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
871 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
873 (unsigned long *)&handle->hal_handle->ae_mask))
875 ae_data = &obj_handle->ae_data[ae];
876 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
877 ICP_QAT_UCLO_MAX_CTX); s++) {
878 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
880 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
881 if (qat_hal_set_ae_ctx_mode(handle, ae,
882 (char)ICP_QAT_CTX_MODE
883 (uof_image->ae_mode))) {
884 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
887 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
888 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
889 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
892 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
893 (char)ICP_QAT_LOC_MEM0_MODE
894 (uof_image->ae_mode))) {
895 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
898 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
899 (char)ICP_QAT_LOC_MEM1_MODE
900 (uof_image->ae_mode))) {
901 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
909 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
911 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
912 struct icp_qat_uclo_encapme *image;
915 for (a = 0; a < obj_handle->uimage_num; a++) {
916 image = &obj_handle->ae_uimage[a];
917 image->uwords_num = image->page->beg_addr_p +
918 image->page->micro_words_num;
922 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
924 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
927 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
929 if (!obj_handle->uword_buf)
931 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
932 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
933 obj_handle->obj_hdr->file_buff;
934 obj_handle->uword_in_bytes = 6;
935 obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
936 obj_handle->prod_rev = PID_MAJOR_REV |
937 (PID_MINOR_REV & handle->hal_handle->revision_id);
938 if (qat_uclo_check_uof_compat(obj_handle)) {
939 pr_err("QAT: UOF incompatible\n");
942 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
943 if (!obj_handle->obj_hdr->file_buff ||
944 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
945 &obj_handle->str_table)) {
946 pr_err("QAT: UOF doesn't have effective images\n");
949 obj_handle->uimage_num =
950 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
951 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
952 if (!obj_handle->uimage_num)
954 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
955 pr_err("QAT: Bad object\n");
956 goto out_check_uof_aemask_err;
958 qat_uclo_init_uword_num(handle);
959 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
960 &obj_handle->init_mem_tab);
961 if (qat_uclo_set_ae_mode(handle))
962 goto out_check_uof_aemask_err;
964 out_check_uof_aemask_err:
965 for (ae = 0; ae < obj_handle->uimage_num; ae++)
966 kfree(obj_handle->ae_uimage[ae].page);
968 kfree(obj_handle->uword_buf);
972 void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
973 void *addr_ptr, int mem_size)
975 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
978 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
979 void *addr_ptr, int mem_size)
981 struct icp_qat_uof_filehdr *filehdr;
982 struct icp_qat_uclo_objhandle *objhdl;
984 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
985 (sizeof(handle->hal_handle->ae_mask) * 8));
987 if (!handle || !addr_ptr || mem_size < 24)
989 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
992 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
993 if (!objhdl->obj_buf)
995 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
996 if (qat_uclo_check_format(filehdr))
998 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1000 if (!objhdl->obj_hdr) {
1001 pr_err("QAT: object file chunk is null\n");
1002 goto out_objhdr_err;
1004 handle->obj_handle = objhdl;
1005 if (qat_uclo_parse_uof_obj(handle))
1006 goto out_overlay_obj_err;
1009 out_overlay_obj_err:
1010 handle->obj_handle = NULL;
1011 kfree(objhdl->obj_hdr);
1013 kfree(objhdl->obj_buf);
1019 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1021 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1027 kfree(obj_handle->uword_buf);
1028 for (a = 0; a < obj_handle->uimage_num; a++)
1029 kfree(obj_handle->ae_uimage[a].page);
1031 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1032 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1034 kfree(obj_handle->obj_hdr);
1035 kfree(obj_handle->obj_buf);
1037 handle->obj_handle = NULL;
1040 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1041 struct icp_qat_uclo_encap_page *encap_page,
1042 uint64_t *uword, unsigned int addr_p,
1043 unsigned int raddr, uint64_t fill)
1052 for (i = 0; i < encap_page->uwblock_num; i++) {
1053 if (raddr >= encap_page->uwblock[i].start_addr &&
1054 raddr <= encap_page->uwblock[i].start_addr +
1055 encap_page->uwblock[i].words_num - 1) {
1056 raddr -= encap_page->uwblock[i].start_addr;
1057 raddr *= obj_handle->uword_in_bytes;
1058 memcpy(&uwrd, (void *)(((unsigned long)
1059 encap_page->uwblock[i].micro_words) + raddr),
1060 obj_handle->uword_in_bytes);
1061 uwrd = uwrd & 0xbffffffffffull;
1065 if (*uword == INVLD_UWORD)
1069 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1070 struct icp_qat_uclo_encap_page
1071 *encap_page, unsigned int ae)
1073 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1074 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1077 /* load the page starting at appropriate ustore address */
1078 /* get fill-pattern from an image -- they are all the same */
1079 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1081 uw_physical_addr = encap_page->beg_addr_p;
1082 uw_relative_addr = 0;
1083 words_num = encap_page->micro_words_num;
1085 if (words_num < UWORD_CPYBUF_SIZE)
1088 cpylen = UWORD_CPYBUF_SIZE;
1090 /* load the buffer */
1091 for (i = 0; i < cpylen; i++)
1092 qat_uclo_fill_uwords(obj_handle, encap_page,
1093 &obj_handle->uword_buf[i],
1094 uw_physical_addr + i,
1095 uw_relative_addr + i, fill_pat);
1097 /* copy the buffer to ustore */
1098 qat_hal_wr_uwords(handle, (unsigned char)ae,
1099 uw_physical_addr, cpylen,
1100 obj_handle->uword_buf);
1102 uw_physical_addr += cpylen;
1103 uw_relative_addr += cpylen;
1104 words_num -= cpylen;
1108 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1109 struct icp_qat_uof_image *image)
1111 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1112 unsigned int ctx_mask, s;
1113 struct icp_qat_uclo_page *page;
1117 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1121 /* load the default page and set assigned CTX PC
1122 * to the entrypoint address */
1123 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1124 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1126 /* find the slice to which this image is assigned */
1127 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1128 if (image->ctx_assigned & obj_handle->ae_data[ae].
1129 ae_slices[s].ctx_mask_assigned)
1132 if (s >= obj_handle->ae_data[ae].slice_num)
1134 page = obj_handle->ae_data[ae].ae_slices[s].page;
1135 if (!page->encap_page->def_page)
1137 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1139 page = obj_handle->ae_data[ae].ae_slices[s].page;
1140 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1141 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1142 (ctx_mask & (1 << ctx)) ? page : NULL;
1143 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1144 image->ctx_assigned);
1145 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1146 image->entry_address);
1150 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1152 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1155 if (qat_uclo_init_globals(handle))
1157 for (i = 0; i < obj_handle->uimage_num; i++) {
1158 if (!obj_handle->ae_uimage[i].img_ptr)
1160 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1162 qat_uclo_wr_uimage_page(handle,
1163 obj_handle->ae_uimage[i].img_ptr);