Projects
Eulaceura:Factory
libkae
_service:obs_scm:libkae-1628763809.caed576.obscpio
Sign Up
Log In
Username
Password
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File _service:obs_scm:libkae-1628763809.caed576.obscpio of Package libkae
07070100000000000081A40000000000000000000000016114F6A10000041B000000000000000000000000000000000000005000000000libkae-1628763809.caed576/0001-Don-t-redefine-gettid-if-glibc-provides-it.patchFrom 04f80ac2c822543f130c30942560e4a3300f8481 Mon Sep 17 00:00:00 2001 From: lingsheng <lingsheng@huawei.com> Date: Tue, 28 Jul 2020 09:41:05 +0800 Subject: [PATCH] Don't redefine gettid if glibc provides it glibc 2.30+ include a definition for gettid() so that users don't have to manually define gettid() with syscall(). Old code don't check this, this will redefine gettid() on the latest versions of glibc, causing build fail. Signed-off-by: lingsheng <lingsheng@huawei.com> --- KAE/utils/engine_utils.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/KAE/utils/engine_utils.h b/KAE/utils/engine_utils.h index 160e449..12a587e 100644 --- a/KAE/utils/engine_utils.h +++ b/KAE/utils/engine_utils.h @@ -28,7 +28,15 @@ #include <sys/types.h> #include <sys/syscall.h> +#if __GLIBC_PREREQ(2, 30) +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include <unistd.h> +#else #define gettid() syscall(SYS_gettid) +#endif + #define PRINTPID \ US_DEBUG("pid=%d, ptid=%lu, tid=%d", getpid(), pthread_self(), gettid()) -- 2.23.0 07070100000001000081A40000000000000000000000016114F6A1000002FA000000000000000000000000000000000000003700000000libkae-1628763809.caed576/0002-fix-pthread_yield.patchFrom f0c7130d9186dbb62367c0eb6c4d2ae6f78ef577 Mon Sep 17 00:00:00 2001 From: caodongxia <315816521@qq.com> Date: Thu, 12 Aug 2021 11:49:36 +0800 Subject: [PATCH] pthread_yield is deprecated, use sched_yield instead --- KAE/utils/engine_utils.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/KAE/utils/engine_utils.h b/KAE/utils/engine_utils.h index 12a587e..f0b69b2 100644 --- a/KAE/utils/engine_utils.h +++ b/KAE/utils/engine_utils.h @@ -129,7 +129,7 @@ static inline void kae_memcpy(void *src, const void *dst, int len) static inline void kae_pthread_yield() { - (void)pthread_yield(); //lint !e1055 + (void)sched_yield(); //lint !e1055 } int kae_create_thread(pthread_t *thread_id, const pthread_attr_t *attr, -- 2.27.0 07070100000002000081A40000000000000000000000016114F6A100000032000000000000000000000000000000000000002C00000000libkae-1628763809.caed576/config_for_securerpm_name:libkae sec_opt:-Wno-unused-result fs_opt:07070100000003000081A40000000000000000000000016114F6A10000096E000000000000000000000000000000000000002300000000libkae-1628763809.caed576/kae.spec%global debug_package %{nil} Name: libkae Summary: Huawei Kunpeng Accelerator Engine Version: 1.2.10 Release: 5 License: Apache-2.0 Source: %{name}-%{version}.tar.gz Vendor: Huawei Corporation ExclusiveOS: linux URL: https://support.huawei.com BuildRoot: %{_tmppath}/%{name}-%{version}-root Prefix: /usr/local/lib/engines-1.1 Conflicts: %{name} < %{version}-%{release} Provides: %{name} = %{version}-%{release} BuildRequires: libwd >= %{version} openssl-devel sed Requires: openssl ExclusiveArch: aarch64 Patch0001: 0001-Don-t-redefine-gettid-if-glibc-provides-it.patch Patch0002: 0002-fix-pthread_yield.patch %description This package contains the Huawei Kunpeng Accelerator Engine %prep %autosetup -c -n %{name}-%{version} -p1 %build cd KAE chmod +x configure ./configure make %install mkdir -p ${RPM_BUILD_ROOT}/usr/local/lib/engines-1.1 install -b -m755 KAE/libkae.so.%{version} ${RPM_BUILD_ROOT}/usr/local/lib/engines-1.1 %clean rm -rf ${RPM_BUILD_ROOT} %files %defattr(755,root,root) /usr/local/lib/engines-1.1/libkae.so.%{version} %pre if [ "$1" = "2" ] ; then #2: update rm -rf $RPM_INSTALL_PREFIX/kae.so > /dev/null 2>&1 || true rm -rf $RPM_INSTALL_PREFIX/kae.so.0 > /dev/null 2>&1 || true fi %post if [[ "$1" = "1" || "$1" = "2" ]] ; then #1: install 2: update ln -sf $RPM_INSTALL_PREFIX/libkae.so.%{version} $RPM_INSTALL_PREFIX/kae.so ln -sf $RPM_INSTALL_PREFIX/libkae.so.%{version} $RPM_INSTALL_PREFIX/kae.so.0 fi /sbin/ldconfig %preun if [ "$1" = "0" ] ; then #0: uninstall rm -rf $RPM_INSTALL_PREFIX/kae.so > /dev/null 2>&1 || true rm -rf $RPM_INSTALL_PREFIX/kae.so.0 > /dev/null 2>&1 || true rm -f /var/log/kae.log > /dev/null 2>&1 || true rm -f /var/log/kae.log.old > /dev/null 2>&1 || true fi %postun /sbin/ldconfig %changelog * Thu Aug 12 2021 caodongxia <caodongxia@huawei.com> 1.2.10-5 - Fix pthread_yield is deprecated * Tue Jul 28 2020 lingsheng <lingsheng@huawei.com> 1.2.10-4 - Check glibc version to avoid redefine gettid() * Sun Mar 15 2020 zhangtao <zhangtao221@huawei.com> 1.2.10-3 - Specify aarch64 compilation * Tue Mar 03 2020 catastrowings <jianghuhao1994@163.com> 1.2.10-2 - openEuler init * Tue Jan 07 2020 jinbinhua <jinbinhua@huawei.com> 1.2.7-1 - First Spec Version Include kunpeng_engine Code 07070100000004000081A40000000000000000000000016114F6A10005A000000000000000000000000000000000000000002F00000000libkae-1628763809.caed576/libkae-1.2.10.tar.gzKAE/0000755060212406010010000000000013627454747006416 5ustar KAE/wdmngr/0000755060212406010010000000000013616500010007661 5ustar KAE/wdmngr/wd_alg_queue.c0000644060212406010010000000372613616500010012476 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the wd queue management module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "wd_alg_queue.h" #include "engine_log.h" struct wd_queue* wd_new_queue(int algtype) { struct wd_queue* queue = (struct wd_queue *)kae_malloc(sizeof(struct wd_queue)); if (queue == NULL) { US_ERR("malloc failed"); return NULL; } kae_memset(queue, 0, sizeof(struct wd_queue)); switch (algtype) { case WCRYPTO_RSA: queue->capa.alg = "rsa"; break; case WCRYPTO_DH: queue->capa.alg = "dh"; break; case WCRYPTO_CIPHER: queue->capa.alg = "cipher"; break; case WCRYPTO_DIGEST: queue->capa.alg = "digest"; break; case WCRYPTO_COMP: case WCRYPTO_EC: case WCRYPTO_RNG: default: US_WARN("not support algtype:%d", algtype); kae_free(queue); queue = NULL; return NULL; } int ret = wd_request_queue(queue); if (ret) { US_ERR("request wd queue fail!errno:%d", ret); kae_free(queue); queue = NULL; return NULL; } return queue; } void wd_free_queue(struct wd_queue* queue) { if (queue != NULL) { wd_release_queue(queue); kae_free(queue); queue = NULL; } } KAE/wdmngr/wd_queue_memory.h0000644060212406010010000000707513616500010013251 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for wd_queue_memory.c * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __WD_QUEUE_MEMORY_H #define __WD_QUEUE_MEMORY_H #include <semaphore.h> #include "wd.h" #include "wd_alg_queue.h" #include "engine_utils.h" #define KAE_QUEUE_POOL_MAX_SIZE 512 #define CHECK_QUEUE_TIME_SECONDS 5 // seconds /* * once use 3 block for ctx&pubkey*prikey. * the max Concurrent num = HPRE_BLOCK_NUM/3 * when use 4096bit rsa. block use max is 3576. * 3576 = sizeof(ctx)(248)+ pubkey_size(1024) + prikey_size(2304) * that means max block used is 2304. set 4096 for reserve */ #define RSA_BLOCK_NUM 16 #define RSA_BLOCK_SIZE 4096 #define DH_BLOCK_NUM 16 #define DH_BLOCK_SIZE 4096 #define CIPHER_BLOCK_NUM 4 #define CIPHER_BLOCK_SIZE (272*1024) #define DIGEST_BLOCK_NUM 4 #define DIGEST_BLOCK_SIZE (512 * 1024) typedef void (*release_engine_ctx_cb)(void* engine_ctx); typedef struct KAE_QUEUE_DATA_NODE { struct wd_queue *kae_wd_queue; struct wd_queue_mempool *kae_queue_mem_pool; void *engine_ctx; } KAE_QUEUE_DATA_NODE_S; typedef struct KAE_QUEUE_POOL_NODE { // int using_flag; /* used:true,nouse:false */ struct kae_spinlock spinlock; time_t add_time; // int index; /* index of node,init:-1 */ KAE_QUEUE_DATA_NODE_S *node_data; // KAE_QUEUE_POOL_NODE_S *next; } KAE_QUEUE_POOL_NODE_S; typedef struct KAE_QUEUE_POOL_HEAD { // int init_flag; int pool_use_num; int algtype; /* alg type,just init at init pool */ pthread_mutex_t destroy_mutex; pthread_mutex_t kae_queue_mutex; struct KAE_QUEUE_POOL_HEAD *next; /* next pool */ KAE_QUEUE_POOL_NODE_S *kae_queue_pool; /* point to a attray */ } KAE_QUEUE_POOL_HEAD_S; struct wd_queue_mempool { struct wd_queue *q; void *base; unsigned int *bitmap; unsigned int block_size; unsigned int block_num; unsigned int mem_size; unsigned int block_align_size; unsigned int free_num; unsigned int fail_times; unsigned long long index; sem_t mempool_sem; int dev; }; struct wd_queue_mempool *wd_queue_mempool_create(struct wd_queue *q, unsigned int block_size, unsigned int block_num); void wd_queue_mempool_destroy(struct wd_queue_mempool *pool); void kae_wd_free_blk(void *pool, void *blk); void *kae_wd_alloc_blk(void *pool, size_t size); void *kae_dma_map(void *usr, void *va, size_t sz); void kae_dma_unmap(void *usr, void *va, void *dma, size_t sz); KAE_QUEUE_POOL_HEAD_S* kae_init_queue_pool (int algtype); KAE_QUEUE_DATA_NODE_S* kae_get_node_from_pool(KAE_QUEUE_POOL_HEAD_S* pool_head); int kae_put_node_to_pool (KAE_QUEUE_POOL_HEAD_S* pool_head, KAE_QUEUE_DATA_NODE_S* node_data); void kae_queue_pool_reset(KAE_QUEUE_POOL_HEAD_S* pool_head); void kae_queue_pool_destroy(KAE_QUEUE_POOL_HEAD_S* pool_head, release_engine_ctx_cb release_fn); void kae_queue_pool_check_and_release(KAE_QUEUE_POOL_HEAD_S* pool_head, release_engine_ctx_cb release_ectx_fn); #endif KAE/wdmngr/wd_queue_memory.c0000644060212406010010000004163313616500010013242 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine of wd queue memory management * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "wd_queue_memory.h" #include "engine_utils.h" #include "engine_log.h" #include "wd_bmm.h" #define MAXBLOCKSIZE 0x90000 #define MAXRSVMEM 0x400000 #define MAXBLOCKSIZE 0x90000 #define MAXRSVMEM 0x400000 const char *g_alg_type[] = { "rsa", "dh", "cipher", "digest", }; struct wd_queue_mempool *wd_queue_mempool_create(struct wd_queue *q, unsigned int block_size, unsigned int block_num) { void *addr = NULL; unsigned long rsv_mm_sz; struct wd_queue_mempool *pool = NULL; unsigned int bitmap_sz; const unsigned int BLOCKS_PER_BITMAP = 32; if (block_size > MAXBLOCKSIZE) { US_ERR("error! current blk size is beyond 576k"); return NULL; } rsv_mm_sz = (unsigned long)block_size * (unsigned long)block_num; if (rsv_mm_sz > (unsigned long)MAXRSVMEM) { US_ERR("error! current mem size is beyond 4M"); return NULL; } addr = wd_reserve_memory(q, rsv_mm_sz); if (addr == NULL) { US_ERR("reserve_memory fail!"); return NULL; } kae_memset(addr, 0, rsv_mm_sz); bitmap_sz = (block_num / BLOCKS_PER_BITMAP + 1) * sizeof(unsigned int); pool = (struct wd_queue_mempool *)kae_malloc(sizeof(struct wd_queue_mempool) + bitmap_sz); if (pool == NULL) { US_ERR("Alloc pool handle fail!"); return NULL; } kae_memset(pool, 0, sizeof(struct wd_queue_mempool) + bitmap_sz); pool->base = addr; sem_init(&pool->mempool_sem, 0, 1); pool->block_size = block_size; pool->block_num = block_num; pool->free_num = block_num; pool->bitmap = (unsigned int *) (pool + 1); pool->mem_size = rsv_mm_sz; pool->q = q; return pool; } struct wd_queue_mempool *create_alg_wd_queue_mempool(int algtype, struct wd_queue *q) { struct wd_queue_mempool *mempool = NULL; unsigned int block_size; unsigned int block_num; switch (algtype) { case WCRYPTO_RSA: block_size = RSA_BLOCK_SIZE; block_num = RSA_BLOCK_NUM; break; case WCRYPTO_DH: block_size = DH_BLOCK_SIZE; block_num = DH_BLOCK_NUM; break; case WCRYPTO_CIPHER: block_size = CIPHER_BLOCK_SIZE; block_num = CIPHER_BLOCK_NUM; break; case WCRYPTO_DIGEST: block_size = DIGEST_BLOCK_SIZE; block_num = DIGEST_BLOCK_NUM; break; case WCRYPTO_COMP: case WCRYPTO_EC: case WCRYPTO_RNG: default: US_WARN("create_alg_wd_queue_mempool not support algtype:%d", algtype); return NULL; } #ifdef NO_WD_BLK_POOL mempool = wd_queue_mempool_create(q, block_size, block_num); #else struct wd_blkpool_setup setup; kae_memset(&setup, 0, sizeof(setup)); setup.block_size = block_size; setup.block_num = block_num; setup.align_size = 64; // align with 64 mempool = (struct wd_queue_mempool *)wd_blkpool_create(q, &setup); #endif return mempool; } void wd_queue_mempool_destroy(struct wd_queue_mempool *pool) { #ifdef NO_WD_BLK_POOL kae_free(pool); #else wd_blkpool_destroy(pool); #endif return; } void *kae_dma_map(void *usr, void *va, size_t sz) { #ifdef NO_WD_BLK_POOL struct wd_queue_mempool *pool = (struct wd_queue_mempool *)usr; return wd_dma_map(pool->q, va, sz); #else return wd_blk_iova_map(usr, va); #endif } void kae_dma_unmap(void *usr, void *va, void *dma, size_t sz) { #ifdef NO_WD_BLK_POOL struct wd_queue_mempool *pool = (struct wd_queue_mempool *)usr; return wd_dma_unmap(pool->q, va, dma, sz); #else return wd_blk_iova_unmap(usr, dma, va); #endif } #ifdef NO_WD_BLK_POOL static void *wd_queue_pool_alloc_buf(struct wd_queue_mempool *pool) { __u64 i = 0; __u64 j = 0; (void) sem_wait(&pool->mempool_sem); __u32 *pbm = pool->bitmap; __u64 tmp = pool->index; for (; pool->index < pool->block_num; pool->index++) { i = (pool->index >> 5); j = (pool->index & (32 - 1)); if ((pbm[i] & ((__u32) 0x1 << j)) == 0) { pbm[i] |= ((__u32) 0x1 << j); tmp = pool->index; pool->index++; (void) sem_post(&pool->mempool_sem); return (void*)((char *) pool->base + (tmp * pool->block_size)); } } for (pool->index = 0; pool->index < tmp; pool->index++) { i = (pool->index >> 5); j = (pool->index & (32 - 1)); if ((pbm[i] & ((__u32) 0x1 << j)) == 0) { pbm[i] |= ((__u32) 0x1 << j); tmp = pool->index; pool->index++; (void) sem_post(&pool->mempool_sem); return (void*)((char *) pool->base + (tmp * pool->block_size)); } } (void) sem_post(&pool->mempool_sem); US_ERR("no reserve mem available!"); return NULL; } static void wd_queue_pool_free_buf(struct wd_queue_mempool *pool, void *pbuf) { __u32 *pbm = pool->bitmap; kae_memset(pbuf, 0, pool->block_size); __u64 offset = (__u64)((unsigned long) pbuf - (unsigned long) pool->base); offset = offset / pool->block_size; if (pool->block_num <= offset) { US_ERR("offset = %lld, virtual address err!", offset); return; } __u32 bit_mask = ~(0x1u << (offset & 31)); (void) sem_wait(&pool->mempool_sem); pbm[(offset >> 5)] &= bit_mask; (void) sem_post(&pool->mempool_sem); } #endif void *kae_wd_alloc_blk(void *pool, size_t size) { if (pool == NULL) { US_ERR("mem pool empty!"); return NULL; } #ifdef NO_WD_BLK_POOL struct wd_queue_mempool *mempool = (struct wd_queue_mempool *)pool; if (size > (size_t)mempool->block_size) { US_ERR("alloc size error, over one block size."); return NULL; } return wd_queue_pool_alloc_buf((struct wd_queue_mempool *)pool); #else return wd_alloc_blk(pool); #endif } void kae_wd_free_blk(void *pool, void *blk) { #ifdef NO_WD_BLK_POOL wd_queue_pool_free_buf((struct wd_queue_mempool *)pool, blk); #else wd_free_blk(pool, blk); #endif } KAE_QUEUE_POOL_HEAD_S* kae_init_queue_pool(int algtype) { KAE_QUEUE_POOL_HEAD_S *kae_pool = NULL; kae_pool = (KAE_QUEUE_POOL_HEAD_S *)kae_malloc(sizeof(KAE_QUEUE_POOL_HEAD_S)); if (kae_pool == NULL) { US_ERR("malloc pool head fail!"); return NULL; } /* fill data of head */ kae_pool->algtype = algtype; kae_pool->next = NULL; kae_pool->pool_use_num = 0; /* malloc a pool */ kae_pool->kae_queue_pool = (KAE_QUEUE_POOL_NODE_S *) kae_malloc(KAE_QUEUE_POOL_MAX_SIZE * sizeof(KAE_QUEUE_POOL_NODE_S)); if (kae_pool->kae_queue_pool == NULL) { US_ERR("malloc failed"); kae_free(kae_pool); return NULL; } kae_memset(kae_pool->kae_queue_pool, 0, KAE_QUEUE_POOL_MAX_SIZE * sizeof(KAE_QUEUE_POOL_NODE_S)); pthread_mutex_init(&kae_pool->kae_queue_mutex, NULL); pthread_mutex_init(&kae_pool->destroy_mutex, NULL); US_DEBUG("kae init %s queue success", g_alg_type[algtype]); return kae_pool; } static KAE_QUEUE_DATA_NODE_S* kae_get_queue_data_from_list(KAE_QUEUE_POOL_HEAD_S* pool_head) { int i = 0; KAE_QUEUE_DATA_NODE_S *queue_data_node = NULL; KAE_QUEUE_POOL_HEAD_S *temp_pool = pool_head; US_DEBUG("kae get queue node from pool start."); if ((pool_head->pool_use_num == 0) && (pool_head->next == NULL)) { return queue_data_node; } while (temp_pool != NULL) { for (i = 0; i < temp_pool->pool_use_num; i++) { if (temp_pool->kae_queue_pool[i].node_data == NULL) { continue; } if (KAE_SPIN_TRYLOCK(temp_pool->kae_queue_pool[i].spinlock)) { if (temp_pool->kae_queue_pool[i].node_data == NULL) { KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); continue; } else { queue_data_node = temp_pool->kae_queue_pool[i].node_data; temp_pool->kae_queue_pool[i].node_data = (KAE_QUEUE_DATA_NODE_S *)NULL; KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); US_DEBUG("kae queue pool first success. queue_data_node=%p queue_node id =%d", queue_data_node, i); return queue_data_node; } } } /* next pool */ temp_pool = temp_pool->next; } return queue_data_node; } static void kae_free_wd_queue_memory(KAE_QUEUE_DATA_NODE_S *queue_node, release_engine_ctx_cb release_fn) { if (queue_node != NULL) { if (release_fn != NULL && queue_node->engine_ctx != NULL) { release_fn(queue_node->engine_ctx); queue_node->engine_ctx = NULL; } if (queue_node->kae_queue_mem_pool != NULL) { wd_queue_mempool_destroy(queue_node->kae_queue_mem_pool); queue_node->kae_queue_mem_pool = NULL; } if (queue_node->kae_wd_queue != NULL) { wd_free_queue(queue_node->kae_wd_queue); queue_node->kae_wd_queue = NULL; } kae_free(queue_node); queue_node = NULL; } US_DEBUG("free wd queue success"); } static KAE_QUEUE_DATA_NODE_S* kae_new_wd_queue_memory(int algtype) { KAE_QUEUE_DATA_NODE_S *queue_node = NULL; queue_node = (KAE_QUEUE_DATA_NODE_S *)kae_malloc(sizeof(KAE_QUEUE_DATA_NODE_S)); if (queue_node == NULL) { US_ERR("malloc failed"); return NULL; } kae_memset(queue_node, 0, sizeof(KAE_QUEUE_DATA_NODE_S)); queue_node->kae_wd_queue = wd_new_queue(algtype); if (queue_node->kae_wd_queue == NULL) { US_ERR("new wd queue fail"); goto err; } queue_node->kae_queue_mem_pool = create_alg_wd_queue_mempool(algtype, queue_node->kae_wd_queue); if (queue_node->kae_queue_mem_pool == NULL) { US_ERR("request mempool fail!"); goto err; } return queue_node; err: kae_free_wd_queue_memory(queue_node, NULL); return NULL; } KAE_QUEUE_DATA_NODE_S* kae_get_node_from_pool(KAE_QUEUE_POOL_HEAD_S* pool_head) { KAE_QUEUE_DATA_NODE_S *queue_data_node = NULL; if (pool_head == NULL) { US_ERR("input params pool_head is null"); return NULL; } queue_data_node = kae_get_queue_data_from_list(pool_head); if (queue_data_node == NULL) { queue_data_node = kae_new_wd_queue_memory(pool_head->algtype); } return queue_data_node; } static void kae_set_pool_use_num(KAE_QUEUE_POOL_HEAD_S *pool, int set_num) { pthread_mutex_lock(&pool->kae_queue_mutex); if (set_num > pool->pool_use_num) { pool->pool_use_num = set_num; } (void)pthread_mutex_unlock(&pool->kae_queue_mutex); } int kae_put_node_to_pool(KAE_QUEUE_POOL_HEAD_S* pool_head, KAE_QUEUE_DATA_NODE_S* node_data) { int i = 0; KAE_QUEUE_POOL_HEAD_S *temp_pool = pool_head; KAE_QUEUE_POOL_HEAD_S *last_pool = NULL; if (node_data == NULL || pool_head == NULL) { return 0; } US_DEBUG("Add nodedata to pool"); while (temp_pool != NULL) { for (i = 0; i < KAE_QUEUE_POOL_MAX_SIZE; i++) { if (temp_pool->kae_queue_pool[i].node_data) { continue; } if (KAE_SPIN_TRYLOCK(temp_pool->kae_queue_pool[i].spinlock)) { if (temp_pool->kae_queue_pool[i].node_data) { KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); continue; } else { temp_pool->kae_queue_pool[i].node_data = node_data; temp_pool->kae_queue_pool[i].add_time = time((time_t *)NULL); KAE_SPIN_UNLOCK(temp_pool->kae_queue_pool[i].spinlock); if (i >= temp_pool->pool_use_num) { kae_set_pool_use_num(temp_pool, i + 1); } US_DEBUG("kae put queue node to pool, queue_node id is %d.", i); return 1; } } } last_pool = temp_pool; temp_pool = temp_pool->next; /* if no empty pool to add,new a pool */ if (temp_pool == NULL) { pthread_mutex_lock(&last_pool->destroy_mutex); if (last_pool->next == NULL) { temp_pool = kae_init_queue_pool(last_pool->algtype); if (temp_pool == NULL) { (void)pthread_mutex_unlock(&last_pool->destroy_mutex); break; } last_pool->next = temp_pool; } (void)pthread_mutex_unlock(&last_pool->destroy_mutex); } } /* if not added,free it */ kae_free_wd_queue_memory(node_data, NULL); return 0; } void kae_queue_pool_reset(KAE_QUEUE_POOL_HEAD_S* pool_head) { (void)pool_head; return; } void kae_queue_pool_destroy(KAE_QUEUE_POOL_HEAD_S* pool_head, release_engine_ctx_cb release_fn) { int error = 0; int i = 0; KAE_QUEUE_DATA_NODE_S *queue_data_node = (KAE_QUEUE_DATA_NODE_S *)NULL; KAE_QUEUE_POOL_HEAD_S *temp_pool = NULL; KAE_QUEUE_POOL_HEAD_S *cur_pool = pool_head; while (cur_pool != NULL) { error = pthread_mutex_lock(&cur_pool->destroy_mutex); if (error != 0) { return; } error = pthread_mutex_lock(&cur_pool->kae_queue_mutex); if (error != 0) { (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); return; } for (i = 0; i < cur_pool->pool_use_num; i++) { queue_data_node = cur_pool->kae_queue_pool[i].node_data; if (queue_data_node != NULL) { kae_free_wd_queue_memory(queue_data_node, release_fn); US_DEBUG("kae queue node destroy success. queue_node id =%d", i); cur_pool->kae_queue_pool[i].node_data = NULL; } } US_DEBUG("pool use num :%d.", cur_pool->pool_use_num); kae_free(cur_pool->kae_queue_pool); (void)pthread_mutex_unlock(&cur_pool->kae_queue_mutex); (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); pthread_mutex_destroy(&cur_pool->kae_queue_mutex); pthread_mutex_destroy(&cur_pool->destroy_mutex); temp_pool = cur_pool; kae_free(cur_pool); cur_pool = temp_pool->next; } US_DEBUG("kae queue pool destory success."); return; } void kae_queue_pool_check_and_release(KAE_QUEUE_POOL_HEAD_S* pool_head, release_engine_ctx_cb release_fn) { int i = 0; int error; time_t current_time; KAE_QUEUE_DATA_NODE_S *queue_data_node = NULL; KAE_QUEUE_POOL_HEAD_S *cur_pool = pool_head; current_time = time((time_t *)NULL); while (cur_pool != NULL) { error = pthread_mutex_lock(&cur_pool->destroy_mutex); if (error != 0) { cur_pool = cur_pool->next; continue; } if (cur_pool->kae_queue_pool == NULL) { (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); cur_pool = cur_pool->next; continue; } for (i = cur_pool->pool_use_num - 1; i >= 0; i--) { if (cur_pool->kae_queue_pool[i].node_data == NULL) { continue; } if (difftime(current_time, cur_pool->kae_queue_pool[i].add_time) < CHECK_QUEUE_TIME_SECONDS) { continue; } if (KAE_SPIN_TRYLOCK(cur_pool->kae_queue_pool[i].spinlock)) { if ((cur_pool->kae_queue_pool[i].node_data == NULL) || (difftime(current_time, cur_pool->kae_queue_pool[i].add_time) < CHECK_QUEUE_TIME_SECONDS)) { KAE_SPIN_UNLOCK(cur_pool->kae_queue_pool[i].spinlock); continue; } else { queue_data_node = cur_pool->kae_queue_pool[i].node_data; cur_pool->kae_queue_pool[i].node_data = (KAE_QUEUE_DATA_NODE_S *)NULL; KAE_SPIN_UNLOCK(cur_pool->kae_queue_pool[i].spinlock); kae_free_wd_queue_memory(queue_data_node, release_fn); US_DEBUG("hpre queue list release success. queue node id =%d", i); } } } (void)pthread_mutex_unlock(&cur_pool->destroy_mutex); cur_pool = cur_pool->next; } return; } KAE/wdmngr/wd_alg_queue.h0000644060212406010010000000161313616500010012474 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for wd_alg_queue.c * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __WD_ALG_QUEUE_H #define __WD_ALG_QUEUE_H #include "wd.h" #include "engine_utils.h" struct wd_queue* wd_new_queue(int algtype); void wd_free_queue(struct wd_queue* queue); #endif KAE/Makefile0000644060212406010010000000572113616500010010030 0ustar # # Author: wudinggui # Date: 2019/7/4 # Description: # compile for accelerator # # Usage: # $ make compile and link the program. # $ make rebuild rebuild the program. The same as make clean && make all. # $ make clean clean the objective, dependent and executable files. # $ make install copy to the system directory. # $ make uninstall clean the executable file from the system directory. #============================================================================== WORK_PATH := . ENGINE_INSTALL_PATH := $(OPENSSL_WORK_PATH)/lib/engines-1.1 CC=gcc LIBNAME := libkae.so VERSION = 1.2.10 TARGET = ${LIBNAME}.${VERSION} SOFTLINK = kae.so ifndef SILENCE SILENCE = @ endif # Src SRCDIRS := ${WORK_PATH}/ SRCDIRS += ${WORK_PATH}/alg/pkey SRCDIRS += ${WORK_PATH}/alg/dh SRCDIRS += ${WORK_PATH}/alg/ciphers SRCDIRS += ${WORK_PATH}/alg/digests SRCDIRS += ${WORK_PATH}/async SRCDIRS += ${WORK_PATH}/wdmngr SRCDIRS += ${WORK_PATH}/utils SRCEXTS := .c # C program # Include INCDIR += -I $(WORK_PATH)/ INCDIR += -I $(WORK_PATH)/alg/pkey INCDIR += -I $(WORK_PATH)/alg/dh INCDIR += -I $(WORK_PATH)/alg/ciphers INCDIR += -I $(WORK_PATH)/alg/digests INCDIR += -I $(WORK_PATH)/async INCDIR += -I $(WORK_PATH)/wdmngr INCDIR += -I $(WORK_PATH)/utils INCDIR += -I $(OPENSSL_WORK_PATH)/include # Include Libs. LIBDIR := -L$(OPENSSL_WORK_PATH)/lib LIBDIR += -L$(WORK_PATH)/../drivers/warpdrive/.libs LIBS := -lcrypto -lwd -pthread LIBS += -lc_nonshared # The flags CFLAGS := -Wall -Werror -fstack-protector-all -fPIC -D_GNU_SOURCE -shared -fgnu89-inline LDFLAGS := $(LIBDIR) LDFLAGS += $(LIBS) LDFLAGS += -Wl,-z,relro,-z,now,-z,noexecstack #safe link option # The command used to delete file. RM = rm -f LN = ln -sf SOURCES = $(foreach d,$(SRCDIRS),$(wildcard $(addprefix $(d)/*,$(SRCEXTS)))) OBJS = $(foreach x,$(SRCEXTS), \ $(patsubst %$(x), %.o, $(filter %$(x),$(SOURCES)))) .PHONY : all objs clean cleanall rebuild all : $(TARGET) # Rules for creating the dependency files (.d). %.d : %.c $(CC) -MM -MD $(CFLAGS) $< # Rules for producing the objects. objs : $(OBJS) %.o : %.c @echo compiling $(notdir $<) $(SILENCE) $(CC) -c $(CFLAGS) $(INCDIR) $(LDFLAGS) -o $@ $< $(TARGET): $(OBJS) @echo Linking $@ $(SILENCE) $(CC) $(CFLAGS) $(INCDIR) $(LDFLAGS) -o ./$(TARGET) $(OBJS) -@objcopy --only-keep-debug ./$(TARGET) $(TARGET).symbol -@strip ./$(TARGET) rebuild: clean all clean : @-$(RM) *.d *.a *.so *.symbol $(TARGET) @-$(RM) @find ${WORK_PATH} -name '*.o' -exec $(RM) {} \; @echo all clean install : mkdir -p $(ENGINE_INSTALL_PATH) install -m 755 $(TARGET) $(ENGINE_INSTALL_PATH) $(LN) $(ENGINE_INSTALL_PATH)/$(TARGET) $(ENGINE_INSTALL_PATH)/$(SOFTLINK) $(LN) $(ENGINE_INSTALL_PATH)/$(TARGET) $(ENGINE_INSTALL_PATH)/$(SOFTLINK).0 uninstall : $(RM) $(ENGINE_INSTALL_PATH)/$(SOFTLINK) $(RM) $(ENGINE_INSTALL_PATH)/$(SOFTLINK).0 $(RM) $(ENGINE_INSTALL_PATH)/$(TARGET) $(RM) /var/log/kae.log $(RM) /var/log/kae.log.old KAE/async/0000755060212406010010000000000013616500010007500 5ustar KAE/async/async_event.c0000644060212406010010000001141013616500010012157 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides implementation for async events in KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #ifndef _USE_GNU # define _USE_GNU #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <sys/epoll.h> #include <sys/eventfd.h> #include <unistd.h> #include <openssl/err.h> #include "async_event.h" #include "engine_kae.h" static void async_fd_cleanup(ASYNC_WAIT_CTX *ctx, const void *key, OSSL_ASYNC_FD readfd, void *custom) { (void)ctx; (void)key; (void)custom; if (close(readfd) != 0) { US_WARN("Failed to close fd: %d - error: %d\n", readfd, errno); } } int async_setup_async_event_notification(int jobStatus) { (void)jobStatus; ASYNC_JOB *job; ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; void *custom = NULL; job = ASYNC_get_current_job(); if (job == NULL) { US_ERR("Could not obtain current async job\n"); return 0; } waitctx = ASYNC_get_wait_ctx(job); if (waitctx == NULL) { US_ERR("current job has no waitctx."); return 0; } if (ASYNC_WAIT_CTX_get_fd(waitctx, g_engine_kae_id, &efd, &custom) == 0) { efd = eventfd(0, EFD_NONBLOCK); if (efd == -1) { US_ERR("efd error."); return 0; } if (ASYNC_WAIT_CTX_set_wait_fd(waitctx, g_engine_kae_id, efd, custom, async_fd_cleanup) == 0) { US_ERR("set wait fd error."); async_fd_cleanup(waitctx, g_engine_kae_id, efd, NULL); return 0; } } return 1; } int async_clear_async_event_notification() { ASYNC_JOB *job; ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; size_t num_add_fds = 0; size_t num_del_fds = 0; void *custom = NULL; job = ASYNC_get_current_job(); if (job == NULL) { US_ERR("no async job."); return 0; } waitctx = ASYNC_get_wait_ctx(job); if (waitctx == NULL) { US_ERR("The job has no waitctx"); return 0; } if (ASYNC_WAIT_CTX_get_changed_fds(waitctx, NULL, &num_add_fds, NULL, &num_del_fds) == 0) { US_ERR("no add fds."); return 0; } if (num_add_fds > 0) { if (ASYNC_WAIT_CTX_get_fd(waitctx, g_engine_kae_id, &efd, &custom) == 0) { US_ERR("no fd."); return 0; } async_fd_cleanup(waitctx, g_engine_kae_id, efd, NULL); if (ASYNC_WAIT_CTX_clear_fd(waitctx, g_engine_kae_id) == 0) { US_ERR("clear fd error."); return 0; } } return 1; } int async_pause_job(volatile ASYNC_JOB *job, int jobStatus) { (void)jobStatus; ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; void *custom = NULL; uint64_t buf = 0; int ret = 0; waitctx = ASYNC_get_wait_ctx((ASYNC_JOB *)job); if (waitctx == NULL) { US_ERR("error. waitctx is NULL\n"); return ret; } if (ASYNC_pause_job() == 0) { US_ERR("Failed to pause the job\n"); return ret; } ret = ASYNC_WAIT_CTX_get_fd(waitctx, g_engine_kae_id, &efd, &custom); if (ret > 0) { if (read(efd, &buf, sizeof(uint64_t)) == -1) { if (errno != EAGAIN) { US_WARN("Failed to read from fd: %d - error: %d\n", efd, errno); } /* Not resumed by the expected async_wake_job() */ return ASYNC_JOB_RESUMED_UNEXPECTEDLY; } } return ret; } int async_wake_job(volatile ASYNC_JOB *job, int jobStatus) { (void)jobStatus; ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; void *custom = NULL; uint64_t buf = 1; int ret = 0; waitctx = ASYNC_get_wait_ctx((ASYNC_JOB *)job); if (waitctx == NULL) { US_ERR("error. waitctx is NULL\n"); return ret; } ret = ASYNC_WAIT_CTX_get_fd(waitctx, g_engine_kae_id, &efd, &custom); if (ret > 0) { if (write(efd, &buf, sizeof(uint64_t)) == -1) { US_ERR("Failed to write to fd: %d - error: %d\n", efd, errno); } } US_DEBUG("- async wake job success - "); return ret; } /*lint -e(10)*/ KAE/async/async_task_queue.h0000644060212406010010000000415113616500010013215 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides interface for the KAE engine async task queue * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ASYNC_TASK_QUEUE_H #define ASYNC_TASK_QUEUE_H #include <pthread.h> #include "../engine_kae.h" #include "async_callback.h" #include "wd.h" #include <semaphore.h> #define MAX_ALG_SIZE 6 typedef int (*async_recv_t)(void* engine_ctx); enum task_type { ASYNC_TASK_CIPHER, ASYNC_TASK_DIGEST, ASYNC_TASK_RSA, ASYNC_TASK_DH }; struct async_wd_polling_arg { enum task_type type; void *eng_ctx; op_done_t *op_done; }; typedef struct async_wd_polling_arg async_poll_task; typedef struct async_poll_queue_t { async_poll_task* async_poll_task_queue_head; int head_pos; int tail_pos; int cur_task; int left_task; int shutdown; sem_t empty_sem; sem_t full_sem; pthread_mutex_t async_task_mutex; pthread_t thread_id; int init_mark; int exit_mark; } async_poll_queue_t; extern async_poll_queue_t g_async_poll_queue; extern async_recv_t g_async_recv_func[MAX_ALG_SIZE]; int async_register_poll_fn(int type, async_recv_t async_recv); int async_poll_task_init(); async_poll_task* async_get_queue_task(); int async_add_poll_task(void *ctx, op_done_t *op_done, enum task_type type); void async_poll_task_free(); #endif KAE/async/async_event.h0000644060212406010010000000246713616500010012200 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides interface for async events in KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __ASYNC_EVENTS_H__ #define __ASYNC_EVENTS_H__ #include <unistd.h> #include <openssl/async.h> #define ASYNC_JOB_RESUMED_UNEXPECTEDLY (-1) #define ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(x) \ ((x) == ASYNC_JOB_RESUMED_UNEXPECTEDLY) #define ASYNC_STATUS_UNSUPPORTED 0 #define ASYNC_STATUS_ERR 1 #define ASYNC_STATUS_OK 2 #define ASYNC_STATUS_EAGAIN 3 int async_setup_async_event_notification(int jobStatus); int async_clear_async_event_notification(); int async_pause_job(volatile ASYNC_JOB *job, int jobStatus); int async_wake_job(volatile ASYNC_JOB *job, int jobStatus); #endif KAE/async/async_task_queue.c0000644060212406010010000001275713616500010013223 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for the KAE engine async task queue * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #ifndef __USE_GNU # define __USE_GNU #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <signal.h> #include <errno.h> #include <openssl/err.h> #include "async_task_queue.h" #include "engine_kae.h" #include "async_event.h" #include "engine_utils.h" #define ASYNC_POLL_TASK_NUM 4096 async_poll_queue_t g_async_poll_queue = { .init_mark = 0, }; async_recv_t g_async_recv_func[MAX_ALG_SIZE]; int async_register_poll_fn(int type, async_recv_t func) { if (type < 0 || type >= MAX_ALG_SIZE) { return -1; } g_async_recv_func[type] = func; return 0; } int async_poll_task_init() { kae_memset(&g_async_poll_queue, 0, sizeof(g_async_poll_queue)); g_async_poll_queue.async_poll_task_queue_head = (async_poll_task*)malloc(sizeof(async_poll_task) * ASYNC_POLL_TASK_NUM); if (g_async_poll_queue.async_poll_task_queue_head == NULL) { US_ERR("no enough memory for task queue, errno=%d", errno); //lint !e666 return 0; } kae_memset(g_async_poll_queue.async_poll_task_queue_head, 0, sizeof(async_poll_task) * ASYNC_POLL_TASK_NUM); g_async_poll_queue.left_task = ASYNC_POLL_TASK_NUM; int ret = sem_init(&g_async_poll_queue.empty_sem, 0, (unsigned int)g_async_poll_queue.left_task); if (ret != 0) { US_ERR("fail to init empty semaphore, errno=%d", errno); //lint !e666 goto _err; } if (sem_init(&g_async_poll_queue.full_sem, 0, 0) != 0) { US_ERR("fail to init full semaphore, errno=%d", errno); //lint !e666 goto _err; } US_DEBUG("async poll task init done."); return 1; _err: async_poll_task_free(); return 0; } async_poll_task* async_get_queue_task() { async_poll_task *task_queue; async_poll_task *cur_task; int tail_pos; if (pthread_mutex_lock(&g_async_poll_queue.async_task_mutex) != 0) { US_ERR("lock queue mutex failed, errno:%d", errno); //lint !e666 return NULL; } tail_pos = g_async_poll_queue.tail_pos; task_queue = g_async_poll_queue.async_poll_task_queue_head; cur_task = &task_queue[tail_pos]; g_async_poll_queue.tail_pos = (tail_pos + 1) % ASYNC_POLL_TASK_NUM; g_async_poll_queue.cur_task--; g_async_poll_queue.left_task++; if (pthread_mutex_unlock(&g_async_poll_queue.async_task_mutex) != 0) { US_ERR("unlock queue mutex failed, errno:%d", errno); //lint !e666 } if (sem_post(&g_async_poll_queue.empty_sem) != 0) { US_ERR("post empty sem failed, errno:%d", errno); //lint !e666 } US_DEBUG("get task end"); return cur_task; } static int async_add_queue_task(void *eng_ctx, op_done_t *op_done, enum task_type type) { async_poll_task *task_queue; async_poll_task *task; int head_pos; if (sem_wait(&g_async_poll_queue.empty_sem) != 0) { US_ERR("wait empty sem failed, errno:%d", errno); //lint !e666 return 0; } if (pthread_mutex_lock(&g_async_poll_queue.async_task_mutex) != 0) { US_ERR("lock queue mutex failed, errno:%d", errno); //lint !e666 } head_pos = g_async_poll_queue.head_pos; task_queue = g_async_poll_queue.async_poll_task_queue_head; task = &task_queue[head_pos]; task->eng_ctx = eng_ctx; task->op_done = op_done; task->type = type; head_pos = (head_pos + 1) % ASYNC_POLL_TASK_NUM; g_async_poll_queue.head_pos = head_pos; g_async_poll_queue.cur_task++; g_async_poll_queue.left_task--; if (pthread_mutex_unlock(&g_async_poll_queue.async_task_mutex) != 0) { US_ERR("unlock queue mutex failed, errno:%d", errno); //lint !e666 } if (sem_post(&g_async_poll_queue.full_sem) != 0) { US_ERR("post full sem failed, errno:%d", errno); //lint !e666 } US_DEBUG("add task success"); return 1; } static void async_poll_queue_free() { async_poll_task *task = g_async_poll_queue.async_poll_task_queue_head; if (task != NULL) { OPENSSL_free(task); } g_async_poll_queue.async_poll_task_queue_head = NULL; } int async_add_poll_task(void *eng_ctx, op_done_t *op_done, enum task_type type) { US_DEBUG("start to add task to poll queue"); return async_add_queue_task(eng_ctx, op_done, type); } void async_poll_task_free() { int error; error = pthread_mutex_lock(&g_async_poll_queue.async_task_mutex); if (error != 0) { US_ERR("lock mutex failed, errno=%d", errno); //lint !e666 return ; } async_poll_queue_free(); pthread_mutex_unlock(&g_async_poll_queue.async_task_mutex); sem_destroy(&g_async_poll_queue.empty_sem); sem_destroy(&g_async_poll_queue.full_sem); pthread_mutex_destroy(&g_async_poll_queue.async_task_mutex); US_DEBUG("async task free succ"); return; } /*lint -e(10)*/ KAE/async/async_poll.h0000644060212406010010000000162213616500010012015 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides interface for the KAE engine thread polling * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ASYNC_POLLING_H #define ASYNC_POLLING_H #include <pthread.h> #include "../engine_kae.h" #include "async_callback.h" #include "async_task_queue.h" void async_module_init(); #endif KAE/async/async_callback.h0000644060212406010010000000201313616500010012576 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides interface for callback in KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ASYNC_CALLBACK_H #define ASYNC_CALLBACK_H #include <sys/types.h> #include <openssl/async.h> typedef struct { volatile int flag; volatile int verifyRst; volatile ASYNC_JOB *job; } op_done_t; void async_init_op_done(op_done_t *op_done); void async_cleanup_op_done(op_done_t *op_done); #endif KAE/async/async_callback.c0000644060212406010010000000274613616500010012606 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides implementation for callback in KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #ifndef __USE_GNU # define __USE_GNU #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include "async_callback.h" #include "engine_log.h" #include "engine_utils.h" #include <openssl/err.h> void async_init_op_done(op_done_t *op_done) { if ((op_done == NULL)) { US_ERR("error! paramater is NULL."); return ; } op_done->flag = 0; op_done->verifyRst = 0; op_done->job = ASYNC_get_current_job(); } void async_cleanup_op_done(op_done_t *op_done) { if ((op_done == NULL)) { US_ERR("error! paramater is NULL."); return; } op_done->verifyRst = 0; if (op_done->job) { op_done->job = NULL; } return; } /*lint -e(10)*/ KAE/async/async_poll.c0000644060212406010010000000715613616500010012020 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for the KAE engine thread polling * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #ifndef __USE_GNU # define __USE_GNU #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <signal.h> #include <errno.h> #include "engine_check.h" #include "async_poll.h" #include "async_event.h" #include "async_task_queue.h" #include "engine_utils.h" #include <openssl/err.h> #define ASYNC_POLL_TASK_NUM 1024 static void async_polling_thread_destroy(); static void *async_poll_process_func(void *args) { (void)args; int ret; async_poll_task *task; void *eng_ctx; int type; op_done_t *op_done; while (1) { if (sem_wait(&g_async_poll_queue.full_sem) != 0) { if (errno == EINTR) { /* sem_wait is interrupted by interrupt, continue */ continue; } US_ERR("wait async full_sem failed, errno:%d", errno); //lint !e666 } task = async_get_queue_task(); if (task == NULL) { usleep(1); continue; } eng_ctx = task->eng_ctx; op_done = task->op_done; type = task->type; US_DEBUG("async poll thread start to recv result."); ret = g_async_recv_func[type](eng_ctx); op_done->verifyRst = ret; op_done->flag = 1; if (op_done->job) { async_wake_job(op_done->job, ASYNC_STATUS_OK); } US_DEBUG("process task done."); } US_DEBUG("polling thread exit."); return NULL; } void async_polling_thread_reset() { g_async_poll_queue.init_mark = 0; kae_memset(&g_async_poll_queue, 0, sizeof(g_async_poll_queue)); } int async_polling_thread_init() { US_DEBUG("init polling thread."); if (g_async_poll_queue.init_mark == INITED) return 1; kae_memset(&g_async_poll_queue, 0, sizeof(async_poll_queue_t)); if (pthread_mutex_init(&(g_async_poll_queue.async_task_mutex), NULL) < 0) { US_ERR("init queue mutex failed, errno:%d", errno); //lint !e666 } if (!async_poll_task_init()) { US_ERR("init poll task queue failed."); return 0; } pthread_t thread_id; if (kae_create_thread(&thread_id, NULL, async_poll_process_func, NULL) == 0) { US_DEBUG("fail to create polling thread"); goto _err; } g_async_poll_queue.thread_id = thread_id; g_async_poll_queue.init_mark = INITED; (void)OPENSSL_atexit(async_polling_thread_destroy); return 1; _err: async_poll_task_free(); return 0; } static void async_polling_thread_destroy() { if (g_async_poll_queue.exit_mark == 1) return; async_poll_task_free(); g_async_poll_queue.exit_mark = 1; return; } void async_module_init() { if (kae_is_async_enabled()) { async_poll_task_free(); async_polling_thread_reset(); if (!async_polling_thread_init()) { kae_disable_async(); } } } /*lint -e(10)*/ KAE/.travis.yml0000644060212406010010000000127613616500010010502 0ustar arch: - arm64 addons: hosts: - node1 scripts: - cd ../ - wget https://www.openssl.org/source/old/1.1.1/openssl-1.1.1a.tar.gz - tar -zxf openssl-1.1.1a.tar.gz - cd openssl-1.1.1a - sudo ./config -d -Wl,-rpath,/usr/local/lib - sudo make -j20 - sudo make install -j20 - cd ../ - wget https://github.com/kunpengcompute/KAEdriver/releases/download/v1.2.8-beta.1/Kunpeng_KAE_driver-1.2.8-beta.1.tar.gz - tar -zxf Kunpeng_KAE_driver-1.2.8-beta.1.tar.gz - cd Kunpeng_KAE_driver/warpdrive - sudo sh autogen.sh - sudo ./configure - sudo make - sudo make install - cd ../../KAE - sudo chmod +x configure - sudo ./configure - sudo make clean - sudo make KAE/engine_kae.h0000644060212406010010000000177413616500010010632 0ustar /* * Copyright (c) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for an OpenSSL KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ENGINE_KAE_H #define ENGINE_KAE_H #include <openssl/engine.h> #include <openssl/evp.h> #include <openssl/crypto.h> #include "engine_opensslerr.h" #include "engine_log.h" /* Engine id */ extern const char *g_engine_kae_id ; int kae_get_device(const char *dev); #endif // !ENGINE_KAE_H KAE/utils/0000755060212406010010000000000013616500010007523 5ustar KAE/utils/engine_utils.h0000644060212406010010000000644413616500010012371 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for utils module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef KAE_ACC_ENGINE_UTILS_H #define KAE_ACC_ENGINE_UTILS_H #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <sys/file.h> #include <sys/syscall.h> #include <sys/types.h> #include <sys/syscall.h> #define gettid() syscall(SYS_gettid) #define PRINTPID \ US_DEBUG("pid=%d, ptid=%lu, tid=%d", getpid(), pthread_self(), gettid()) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #ifndef true #define true (0 == 0) #endif #ifndef false #define false (0 == 1) #endif enum KAE_Q_INIT_FLAG { NOT_INIT = 0, INITED, }; #define UNUSED(x) (void)(x) #define BLOCKSIZES_OF(data) (sizeof((data)) / sizeof(((data)[0]))) #define KAE_SPIN_INIT(q) kae_spinlock_init(&(q)) #define KAE_SPIN_LOCK(q) kae_spinlock_lock(&(q)) #define KAE_SPIN_TRYLOCK(q) kae_spinlock_trylock(&(q)) #define KAE_SPIN_UNLOCK(q) kae_spinlock_unlock(&(q)) #define kae_free(addr) \ do { \ if (addr != NULL) { \ free(addr); \ addr = NULL; \ } \ } while (0) struct kae_spinlock { int lock; }; static inline void kae_spinlock_init(struct kae_spinlock *lock) { lock->lock = 0; } static inline void kae_spinlock_lock(struct kae_spinlock *lock) { while (__sync_lock_test_and_set(&lock->lock, 1)) {} } static inline int kae_spinlock_trylock(struct kae_spinlock *lock) { return __sync_lock_test_and_set(&lock->lock, 1) == 0; } static inline void kae_spinlock_unlock(struct kae_spinlock *lock) { __sync_lock_release(&lock->lock); } static inline void *kae_malloc(unsigned int size) { return malloc(size); } static inline void *kae_realloc(void *mem_address, unsigned int newsize) { return realloc(mem_address, newsize); } static inline void *kae_calloc(unsigned int num, unsigned int size) { return calloc(num, size); } static inline int kae_strcmp(const char *src, const char *dst) { return strcmp(src, dst); } static inline void kae_memset(void *ptr, int value, int len) { (void)memset(ptr, value, len); } static inline void kae_memcpy(void *src, const void *dst, int len) { (void)memcpy(src, dst, len); } static inline void kae_pthread_yield() { (void)pthread_yield(); //lint !e1055 } int kae_create_thread(pthread_t *thread_id, const pthread_attr_t *attr, void *(*start_func)(void *), void *p_arg); int kae_create_thread_joinable(pthread_t *thread_id, const pthread_attr_t *attr, void *(*start_func)(void *), void *p_arg); inline int kae_join_thread(pthread_t thread_id, void **retval); #endif KAE/utils/engine_fork.h0000644060212406010010000000161013616500010012160 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for a KAE engine fork * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HPRE_FORK_H #define __HPRE_FORK_H void engine_init_child_at_fork_handler(void); void engine_do_before_fork_handler(void); void engine_init_parent_at_fork_handler(void); #endif KAE/utils/engine_opensslerr.h0000644060212406010010000000630213616500010013416 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for error module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HISI_ACC_ENGINE_OPENSSLERR_H #define HISI_ACC_ENGINE_OPENSSLERR_H int err_load_kae_strings(void); void err_unload_kae_strings(void); void err_kae_error(int function, int reason, char *engine_file, int line); #define KAEerr(f, r) err_kae_error((f), (r), OPENSSL_FILE, OPENSSL_LINE) /* Function codes. */ enum HISI_FUNC_CODE { KAE_F_HPRE_GET_RSA_METHODS = 100, KAE_F_CHANGRSAMETHOD, KAE_F_HPRE_PKEY_METHS, KAE_F_BIND_HELPER, KAE_F_RSA_FILL_KENGEN_PARAM, KAE_F_HPRE_RSA_PUBENC, KAE_F_HPRE_RSA_PRIENC, KAE_F_HPRE_RSA_PUBDEC, KAE_F_HPRE_RSA_PRIDEC, KAE_F_HPRE_RSA_PRIMEGEN, KAE_F_HPRE_RSA_KEYGEN, KAE_F_CHECK_PUBKEY_PARAM, KAE_F_HPRE_PUBENC_PADDING, KAE_F_HPRE_PRIENC_PADDING, KAE_F_CHECK_HPRE_PUBDEC_PADDING, KAE_F_CHECK_HPRE_PRIDEC_PADDING, KAE_F_SEC_SM3_INIT, KAE_F_SEC_SM3_FINAL, KAE_F_DIGEST_SOFT_INIT, KAE_F_ENGINE_WD, KAE_F_BIND_FN, KAE_F_CHECK_DATA_VALID, KAE_F_CHECK_MALLOC_SUCC, KAE_F_HPRE_GET_DH_METHODS, KAE_F_HPRE_DH_KEYGEN, KAE_F_HPRE_DH_KEYCOMP, KAE_F_CHANGDHMETHOD }; enum HISI_RESON_CODE { KAE_R_NO_MATCH_DEVICE = 100, KAE_R_MALLOC_FAILURE, KAE_R_HWMEM_MALLOC_FAILURE, KAE_R_INPUT_PARAM_ERR, KAE_R_SET_ID_FAILURE, KAE_R_SET_NAME_FAILURE, KAE_R_SET_PKEYMETH_FAILURE, KAE_R_SET_RSA_FAILURE, KAE_R_SET_DESTORY_FAILURE, KAE_R_SET_INIT_FAILURE, KAE_R_SET_CTRL_FAILURE, KAE_R_SET_CMDDEF_FAILURE, KAE_R_SET_FINISH_FAILURE, KAE_R_UNSUPPORT_HARDWARE_TYPE, KAE_R_TIMEOUT, KAE_R_RSARECV_FAILURE, KAE_R_RSARECV_STATE_FAILURE, KAE_R_RSASEND_FAILURE, KAE_R_GET_ALLOCED_HWMEM_FAILURE, KAE_R_FREE_ALLOCED_HWMEM_FAILURE, KAE_R_RSA_KEY_NOT_COMPELET, KAE_R_RSA_PADDING_FAILURE, KAE_R_DATA_TOO_LARGE_FOR_MODULUS, KAE_R_DATA_GREATER_THEN_MOD_LEN, KAE_R_CHECKPADDING_FAILURE, KAE_R_ERR_LIB_BN, KAE_R_RSA_KEY_SIZE_TOO_SMALL, KAE_R_MODULE_TOO_LARGE, KAE_R_INVAILED_E_VALUE, KAE_R_UNKNOW_PADDING_TYPE, KAE_R_INPUT_FIKE_LENGTH_ZERO, KAE_R_SET_CIPHERS_FAILURE, KAE_R_SET_DIGESTS_FAILURE, KAE_R_NEW_ENGINE_FAILURE, KAE_R_BIND_ENGINE_FAILURE, KAE_R_RSA_SET_METHODS_FAILURE, KAE_R_PUBLIC_KEY_INVALID, KAE_R_PUBLIC_ENCRYPTO_FAILURE, KAE_R_PUBLIC_DECRYPTO_FAILURE, KAE_R_GET_PRIMEKEY_FAILURE, KAE_R_DH_SET_METHODS_FAILURE, KAE_R_SET_DH_FAILURE, KAE_R_DH_KEY_SIZE_TOO_LARGE, KAE_R_DH_INVALID_PARAMETER, KAE_R_ENGINE_ALREADY_DEFINED, }; #endif // HISI_ACC_ENGINE_OPENSSLERR_H KAE/utils/engine_check.c0000644060212406010010000000752713616500010012304 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for an engine check thread * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <pthread.h> #include "sec_ciphers_wd.h" #include "sec_digests_wd.h" #include "hpre_wd.h" #include "hpre_dh_wd.h" #include "engine_check.h" #include "engine_utils.h" #include "engine_log.h" KAE_CHECK_Q_TASK g_kae_check_q_task = { .init_flag = NOT_INIT, }; static pthread_once_t g_check_thread_is_initialized = PTHREAD_ONCE_INIT; static struct kae_spinlock g_kae_async_spinmtx = { .lock = 0, }; static unsigned int g_kae_async_enabled = 1; void kae_enable_async(void) { KAE_SPIN_LOCK(g_kae_async_spinmtx); g_kae_async_enabled = 1; KAE_SPIN_UNLOCK(g_kae_async_spinmtx); } void kae_disable_async(void) { KAE_SPIN_LOCK(g_kae_async_spinmtx); g_kae_async_enabled = 0; KAE_SPIN_UNLOCK(g_kae_async_spinmtx); } int kae_is_async_enabled(void) { return g_kae_async_enabled; } static void kae_set_exit_flag() { g_kae_check_q_task.exit_flag = 1; } static void *kae_checking_q_loop_fn(void *args) { (void)args; while (1) { if (g_kae_check_q_task.exit_flag) { break; } usleep(KAE_QUEUE_CHECKING_INTERVAL); if (g_kae_check_q_task.exit_flag) { break; // double check } kae_queue_pool_check_and_release(wd_ciphers_get_qnode_pool(), wd_ciphers_free_engine_ctx); kae_queue_pool_check_and_release(wd_digests_get_qnode_pool(), wd_digests_free_engine_ctx); kae_queue_pool_check_and_release(wd_hpre_get_qnode_pool(), NULL); kae_queue_pool_check_and_release(wd_hpre_dh_get_qnode_pool(), NULL); } US_INFO("check thread exit normally."); return NULL; // lint !e527 } static void kae_checking_q_thread_destroy(void) { kae_set_exit_flag(); (void)kae_join_thread(g_kae_check_q_task.thread_id, NULL); kae_queue_pool_destroy(wd_ciphers_get_qnode_pool(), wd_ciphers_free_engine_ctx); kae_queue_pool_destroy(wd_digests_get_qnode_pool(), wd_digests_free_engine_ctx); kae_queue_pool_destroy(wd_hpre_get_qnode_pool(), NULL); kae_queue_pool_destroy(wd_hpre_dh_get_qnode_pool(), NULL); return; } static void kae_check_thread_init() { if (g_kae_check_q_task.init_flag == INITED) return; pthread_t thread_id; if (!kae_create_thread_joinable(&thread_id, NULL, kae_checking_q_loop_fn, NULL)) { US_ERR("fail to create check thread"); return; } g_kae_check_q_task.thread_id = thread_id; g_kae_check_q_task.init_flag = INITED; (void)OPENSSL_atexit(kae_checking_q_thread_destroy); return; } int kae_checking_q_thread_init(void) { US_DEBUG("check queue thread init begin"); if (g_kae_check_q_task.init_flag == INITED) return 1; pthread_once(&g_check_thread_is_initialized, kae_check_thread_init); if (g_kae_check_q_task.init_flag != INITED) { US_ERR("check thread init failed"); g_check_thread_is_initialized = PTHREAD_ONCE_INIT; return 0; } return 1; } void kae_check_thread_reset() { kae_memset(&g_kae_check_q_task, 0, sizeof(KAE_CHECK_Q_TASK)); g_check_thread_is_initialized = PTHREAD_ONCE_INIT; } KAE/utils/engine_config.c0000644060212406010010000000516713616500010012472 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides implemenation of configuration file reading for the KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "engine_config.h" int kae_drv_findsection(FILE *stream, const char *v_pszSection) { char line[256]; // array length:256 char *pos = NULL; size_t section_len = strlen(v_pszSection); while (!feof(stream)) { if (fgets(line, sizeof(line), stream) == NULL) { return -1; } pos = line; if (*(pos++) != '[') { continue; } if (memcmp(pos, v_pszSection, section_len) == 0) { pos += section_len; if (*pos == ']') { return 0; } } } return -1; } void kae_drv_get_value(char *pos, char *v_pszValue) { while (*pos != '\0') { if (*pos == ' ') { pos++; continue; } if (*pos == ';') { *(v_pszValue++) = '\0'; return; } *(v_pszValue++) = *(pos++); } } int kae_drv_find_item(FILE *stream, const char *v_pszItem, char *v_pszValue) { char line[256]; // array length:256 char *pos = NULL; while (!feof(stream)) { if (fgets(line, sizeof(line), stream) == NULL) { return -1; } if (strstr(line, v_pszItem) != NULL) { pos = strstr(line, "="); if (pos != NULL) { pos++; kae_drv_get_value(pos, v_pszValue); return 0; } } if ('[' == line[0]) { break; } } return -1; } int kae_drv_get_item(const char *config_file, const char *v_pszSection, const char *v_pszItem, char *v_pszValue) { FILE *stream; int retvalue = -1; stream = fopen(config_file, "r"); if (stream == NULL) { return -1; } if (kae_drv_findsection(stream, v_pszSection) == 0) { retvalue = kae_drv_find_item(stream, v_pszItem, v_pszValue); } fclose(stream); return retvalue; } KAE/utils/engine_fork.c0000644060212406010010000000272613616500010012164 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the inplemenation for a KAE engine fork * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <openssl/rsa.h> #include <openssl/err.h> #include <openssl/evp.h> #include "engine_fork.h" #include "engine_check.h" #include "async_poll.h" #include "hpre_rsa.h" #include "hpre_dh.h" #include "sec_ciphers.h" #include "sec_digests.h" void engine_init_child_at_fork_handler(void) { US_DEBUG("call engine_init_child_at_fork_handler"); kae_check_thread_reset(); if (!kae_checking_q_thread_init()) { US_WARN("kae queue check thread init failed"); } (void)hpre_module_init(); (void)hpre_module_dh_init(); (void)cipher_module_init(); (void)digest_module_init(); async_module_init(); return; } void engine_do_before_fork_handler(void) { return; } void engine_init_parent_at_fork_handler(void) { return; } KAE/utils/engine_check.h0000644060212406010010000000227013616500010012277 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for an engine check thread * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ENGINE_CHECK_H #define ENGINE_CHECK_H #include <pthread.h> #define KAE_QUEUE_CHECKING_INTERVAL 15000 struct kae_check_q_task_s { int init_flag; int exit_flag; pthread_t thread_id; }; typedef struct kae_check_q_task_s KAE_CHECK_Q_TASK; void kae_enable_async(void); void kae_disable_async(void); int kae_is_async_enabled(void); int kae_checking_q_thread_init(void); void kae_check_thread_reset(); #endif // end of ENGINE_CHECK_H KAE/utils/engine_log.h0000644060212406010010000001227413616500010012010 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the interface for log module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef KAE_ACC_ENGINE_LOG_H #define KAE_ACC_ENGINE_LOG_H #include <sys/file.h> #include <unistd.h> #include <stdio.h> #include <time.h> #include <pthread.h> #define LOG_LEVEL_CONFIG KAE_NONE #define KAE_DEBUG_FILE_PATH "/var/log/kae.log" #define KAE_DEBUG_FILE_PATH_OLD "/var/log/kae.log.old" #define KAE_LOG_MAX_SIZE 209715200 extern FILE *g_kae_debug_log_file; extern pthread_mutex_t g_debug_file_mutex; extern const char *g_log_level[]; extern int g_kae_log_level; enum KAE_LOG_LEVEL { KAE_NONE = 0, KAE_ERROR, KAE_WARNING, KAE_INFO, KAE_DEBUG, }; void ENGINE_LOG_LIMIT(int level, int times, int limit, const char *fmt, ...); #define CRYPTO(LEVEL, fmt, args...) \ do { \ if (LEVEL > g_kae_log_level) { \ break; \ } \ struct tm *log_tm_p = NULL; \ time_t timep = time((time_t *)NULL); \ log_tm_p = localtime(&timep); \ flock(g_kae_debug_log_file->_fileno, LOCK_EX); \ pthread_mutex_lock(&g_debug_file_mutex); \ fseek(g_kae_debug_log_file, 0, SEEK_END); \ if (log_tm_p != NULL) { \ fprintf(g_kae_debug_log_file, "[%4d-%02d-%02d %02d:%02d:%02d][%s][%s:%d:%s()] " fmt "\n", \ (1900 + log_tm_p->tm_year), (1 + log_tm_p->tm_mon), log_tm_p->tm_mday, \ log_tm_p->tm_hour, log_tm_p->tm_min, log_tm_p->tm_sec, \ g_log_level[LEVEL], __FILE__, __LINE__, __func__, ##args); \ } else { \ fprintf(g_kae_debug_log_file, "[%s][%s:%d:%s()] " fmt "\n", \ g_log_level[LEVEL], __FILE__, __LINE__, __func__, ##args); \ } \ if (ftell(g_kae_debug_log_file) > KAE_LOG_MAX_SIZE) { \ kae_save_log(g_kae_debug_log_file); \ ftruncate(g_kae_debug_log_file->_fileno, 0); \ fseek(g_kae_debug_log_file, 0, SEEK_SET); \ } \ pthread_mutex_unlock(&g_debug_file_mutex); \ flock(g_kae_debug_log_file->_fileno, LOCK_UN); \ } while (0) #define US_ERR(fmt, args...) CRYPTO(KAE_ERROR, fmt, ##args) #define US_WARN(fmt, args...) CRYPTO(KAE_WARNING, fmt, ##args) #define US_INFO(fmt, args...) CRYPTO(KAE_INFO, fmt, ##args) #define US_DEBUG(fmt, args...) CRYPTO(KAE_DEBUG, fmt, ##args) #define US_WARN_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_WARNING, 3, 30, fmt, ##args) #define US_ERR_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_ERROR, 3, 30, fmt, ##args) #define US_INFO_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_INFO, 3, 30, fmt, ##args) #define US_DEBUG_LIMIT(fmt, args...) ENGINE_LOG_LIMIT(KAE_DEBUG, 3, 30, fmt, ##args) void kae_debug_init_log(); void kae_debug_close_log(); void kae_save_log(FILE *src); /* * desc: print data for debug * @param name the name of buf * @param buf the buf msg when input * @param len bd len */ void dump_data(const char *name, unsigned char *buf, unsigned int len); /* * desc: print bd for debug * @param bd the buf msg when input * @param len bd len */ void dump_bd(unsigned int *bd, unsigned int len); #endif KAE/utils/engine_types.h0000644060212406010010000000173713616500010012375 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for some base type or define for KAE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef KAE_ENGINE_TYPES_H #define KAE_ENGINE_TYPES_H #define OPENSSL_SUCCESS (1) #define OPENSSL_FAIL (0) #define KAE_SUCCESS (0) #define KAE_FAIL (-1) #define NO_C_MODE (UINT_MAX) #define NO_C_ALG (UINT_MAX) #endif KAE/utils/engine_utils.c0000644060212406010010000000350213616500010012354 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for utis module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <pthread.h> #include "engine_utils.h" #include "engine_log.h" int kae_create_thread(pthread_t *thread_id, const pthread_attr_t *attr, void *(*start_func)(void *), void *p_arg) { (void)attr; pthread_attr_t thread_attr; pthread_attr_init(&thread_attr); pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED); if (pthread_create(thread_id, &thread_attr, start_func, p_arg) != 0) { US_ERR("fail to create thread, reason:%s", strerror(errno)); //lint !e666 return 0; } return 1; } int kae_create_thread_joinable(pthread_t *thread_id, const pthread_attr_t *attr, void *(*start_func)(void *), void *p_arg) { (void)attr; pthread_attr_t thread_attr; pthread_attr_init(&thread_attr); pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); if (pthread_create(thread_id, &thread_attr, start_func, p_arg) != 0) { US_ERR("fail to create thread, reason:%s", strerror(errno)); //lint !e666 return 0; } return 1; } inline int kae_join_thread(pthread_t threadId, void **retval) { return pthread_join(threadId, retval); } KAE/utils/engine_log.c0000644060212406010010000001617313616500010012005 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for log module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include "engine_log.h" #include "engine_config.h" #include "engine_utils.h" #define KAE_CONFIG_FILE_NAME "/kae.cnf" #define MAX_LEVEL_LEN 10 #define MAX_CONFIG_LEN 512 static const char *g_kae_conf_env = "KAE_CONF_ENV"; FILE *g_kae_debug_log_file = (FILE *)NULL; pthread_mutex_t g_debug_file_mutex = PTHREAD_MUTEX_INITIALIZER; int g_debug_file_ref_count = 0; int g_log_init_times = 0; int g_kae_log_level = 0; const char *g_log_level[] = { "none", "error", "warning", "info", "debug", }; static char *kae_getenv(const char *name) { return getenv(name); } static void kae_set_conf_debuglevel() { char *conf_path = kae_getenv(g_kae_conf_env); unsigned int i = 0; const char *filename = KAE_CONFIG_FILE_NAME; char *file_path = (char *)NULL; char *debuglev = (char *)NULL; if (conf_path == NULL || strlen(conf_path) > MAX_CONFIG_LEN) { goto err; } file_path = (char *)kae_malloc(strlen(conf_path) + strlen(filename) + 1); debuglev = (char *)kae_malloc(MAX_LEVEL_LEN); if (!file_path || !debuglev) { goto err; } memset(debuglev, 0, MAX_LEVEL_LEN); memset(file_path, 0, sizeof(conf_path) + sizeof(filename) + 1); strcat(file_path, conf_path); strcat(file_path, filename); int ret = kae_drv_get_item(file_path, "LogSection", "debug_level", debuglev); if (ret != 0) { goto err; } for (i = 0; i < sizeof(g_log_level) / sizeof(g_log_level[0]); i++) { if (strncmp(g_log_level[i], debuglev, strlen(debuglev) - 1) == 0) { g_kae_log_level = i; kae_free(file_path); kae_free(debuglev); return; } } err: g_kae_log_level = KAE_ERROR; if (debuglev != NULL) { kae_free(debuglev); debuglev = (char *)NULL; } if (file_path != NULL) { kae_free(file_path); file_path = (char *)NULL; } return; } void kae_debug_init_log() { pthread_mutex_lock(&g_debug_file_mutex); kae_set_conf_debuglevel(); if (!g_debug_file_ref_count && g_kae_log_level != KAE_NONE) { g_kae_debug_log_file = fopen(KAE_DEBUG_FILE_PATH, "a+"); if (g_kae_debug_log_file == NULL) { g_kae_debug_log_file = stderr; US_WARN("unable to open %s", KAE_DEBUG_FILE_PATH); } else { g_debug_file_ref_count++; } } g_log_init_times++; pthread_mutex_unlock(&g_debug_file_mutex); } void kae_debug_close_log() { pthread_mutex_lock(&g_debug_file_mutex); g_log_init_times--; if (g_debug_file_ref_count && (g_log_init_times == 0)) { if (g_kae_debug_log_file != NULL) { fclose(g_kae_debug_log_file); g_debug_file_ref_count--; g_kae_debug_log_file = stderr; } } pthread_mutex_unlock(&g_debug_file_mutex); } void ENGINE_LOG_LIMIT(int level, int times, int limit, const char *fmt, ...) { struct tm *log_tm_p = (struct tm *)NULL; static unsigned long ulpre = 0; static int is_should_print = 5; if (level > g_kae_log_level) { return; } // cppcheck-suppress * va_list args1 = { 0 }; va_start(args1, fmt); time_t curr = time((time_t *)NULL); if (difftime(curr, ulpre) > limit) { is_should_print = times; } if (is_should_print <= 0) { is_should_print = 0; } if (is_should_print-- > 0) { log_tm_p = (struct tm *)localtime(&curr); flock(g_kae_debug_log_file->_fileno, LOCK_EX); pthread_mutex_lock(&g_debug_file_mutex); fseek(g_kae_debug_log_file, 0, SEEK_END); if (log_tm_p != NULL) { fprintf(g_kae_debug_log_file, "[%4d-%02d-%02d %02d:%02d:%02d][%s][%s:%d:%s()] ", (1900 + log_tm_p->tm_year), (1 + log_tm_p->tm_mon), log_tm_p->tm_mday, // base time 1900 year log_tm_p->tm_hour, log_tm_p->tm_min, log_tm_p->tm_sec, g_log_level[level], __FILE__, __LINE__, __func__); } else { fprintf(g_kae_debug_log_file, "[%s][%s:%d:%s()] ", g_log_level[level], __FILE__, __LINE__, __func__); } vfprintf(g_kae_debug_log_file, fmt, args1); fprintf(g_kae_debug_log_file, "\n"); if (ftell(g_kae_debug_log_file) > KAE_LOG_MAX_SIZE) { kae_save_log(g_kae_debug_log_file); ftruncate(g_kae_debug_log_file->_fileno, 0); fseek(g_kae_debug_log_file, 0, SEEK_SET); } pthread_mutex_unlock(&g_debug_file_mutex); flock(g_kae_debug_log_file->_fileno, LOCK_UN); ulpre = time((time_t *)NULL); } va_end(args1); } static int need_debug(void) { if (g_kae_log_level >= KAE_DEBUG) { return 1; } else { return 0; } } /* * desc: print data for debug * @param name the name of buf * @param buf the buf msg when input * @param len bd len */ void dump_data(const char *name, unsigned char *buf, unsigned int len) { unsigned int i; if (need_debug()) { US_DEBUG("DUMP ==> %s", name); for (i = 0; i + 8 <= len; i += 8) { // buf length:8 US_DEBUG("0x%llx: \t%02x %02x %02x %02x %02x %02x %02x %02x", (unsigned long long)(buf + i), *(buf + i), (*(buf + i + 1)), *(buf + i + 2), *(buf + i + 3), // buf offset:0,1,2,3 *(buf + i + 4), *(buf + i + 5), *(buf + i + 6), *(buf + i + 7)); // buf offset:4,5,6,7 } if (len % 8) { // remainder:divide by 8 US_DEBUG ("0x%llx: \t", (unsigned long long)(buf + i)); for (; i < len; i++) { US_DEBUG("%02x ", buf[i]); } } } } /* * desc: print bd for debug * @param bd the buf msg when input * @param len bd len */ void dump_bd(unsigned int *bd, unsigned int len) { unsigned int i; if (need_debug()) { for (i = 0; i < len; i++) { US_DEBUG("Word[%d] 0x%08x", i, bd[i]); } } } void kae_save_log(FILE *src) { int size = 0; char buf[1024] = {0}; // buf length:1024 if (src == NULL) { return; } FILE *dst = fopen(KAE_DEBUG_FILE_PATH_OLD, "w"); if (dst == NULL) { return; } fseek(src, 0, SEEK_SET); while (1) { size = fread(buf, sizeof(char), 1024, src); // buf length:1024 fwrite(buf, sizeof(char), size, dst); if (!size) { break; } } fclose(dst); } KAE/utils/engine_opensslerr.c0000644060212406010010000001473313616500010013420 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for error module * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <openssl/err.h> #include "engine_opensslerr.h" #define ERR_FUNC(func) ERR_PACK(0, func, 0) #define ERR_REASON(reason) ERR_PACK(0, 0, reason) static int g_kae_lib_error_code = 0; static int g_kae_error_init = 1; static ERR_STRING_DATA g_kae_str_functs[] = { { ERR_FUNC(KAE_F_HPRE_GET_RSA_METHODS), "hpre_get_RSA_methods" }, { ERR_FUNC(KAE_F_CHANGRSAMETHOD), "changRsaMethod" }, { ERR_FUNC(KAE_F_HPRE_PKEY_METHS), "hpre_pkey_meths" }, { ERR_FUNC(KAE_F_BIND_HELPER), "bind_helper" }, { ERR_FUNC(KAE_F_RSA_FILL_KENGEN_PARAM), "rsa_fill_keygen_param" }, { ERR_FUNC(KAE_F_HPRE_RSA_PUBENC), "hpre_rsa_public_encrypt" }, { ERR_FUNC(KAE_F_HPRE_RSA_PRIENC), "hpre_rsa_private_encrypt" }, { ERR_FUNC(KAE_F_HPRE_RSA_PUBDEC), "hpre_rsa_public_decrypt" }, { ERR_FUNC(KAE_F_HPRE_RSA_PRIDEC), "hpre_rsa_private_decrypt" }, { ERR_FUNC(KAE_F_HPRE_RSA_PRIMEGEN), "hpre_rsa_primegen" }, { ERR_FUNC(KAE_F_HPRE_RSA_KEYGEN), "hpre_rsa_keygen" }, { ERR_FUNC(KAE_F_CHECK_PUBKEY_PARAM), "check_pubkey_param" }, { ERR_FUNC(KAE_F_HPRE_PUBENC_PADDING), "hpre_pubenc_padding" }, { ERR_FUNC(KAE_F_HPRE_PRIENC_PADDING), "hpre_prienc_padding" }, { ERR_FUNC(KAE_F_CHECK_HPRE_PUBDEC_PADDING), "hpre_check_pubdec_padding" }, { ERR_FUNC(KAE_F_CHECK_HPRE_PRIDEC_PADDING), "hpre_check_pridec_padding" }, { ERR_FUNC(KAE_F_DIGEST_SOFT_INIT), "sec_digest_soft_init" }, { 0, (const char *)NULL } }; static ERR_STRING_DATA g_kae_str_reasons[] = { { ERR_REASON(KAE_R_NO_MATCH_DEVICE), "get no match device.check the hw resource" }, { ERR_REASON(KAE_R_MALLOC_FAILURE), "no system memory to alloc" }, { ERR_REASON(KAE_R_HWMEM_MALLOC_FAILURE), "no hardware memory to alloc" }, { ERR_REASON(KAE_R_INPUT_PARAM_ERR), "input param is invaild" }, { ERR_REASON(KAE_R_SET_ID_FAILURE), "kae engine set id failure" }, { ERR_REASON(KAE_R_SET_NAME_FAILURE), "kae engine set name failure" }, { ERR_REASON(KAE_R_SET_PKEYMETH_FAILURE), "kae engine set pkeymeth function failure" }, { ERR_REASON(KAE_R_SET_RSA_FAILURE), "kae engine set rsa failure" }, { ERR_REASON(KAE_R_SET_DESTORY_FAILURE), "kae engine set destory function failure" }, { ERR_REASON(KAE_R_SET_INIT_FAILURE), "kae engine set init function failure" }, { ERR_REASON(KAE_R_SET_CTRL_FAILURE), "kae engine set ctrl function failure" }, { ERR_REASON(KAE_R_SET_CMDDEF_FAILURE), "kae engine set cmd define failure" }, { ERR_REASON(KAE_R_SET_FINISH_FAILURE), "kae engine set finish function failure" }, { ERR_REASON(KAE_R_UNSUPPORT_HARDWARE_TYPE), "unsupported hardware type" }, { ERR_REASON(KAE_R_TIMEOUT), "Operation timeout" }, { ERR_REASON(KAE_R_RSARECV_FAILURE), "RSA receive failure" }, { ERR_REASON(KAE_R_RSARECV_STATE_FAILURE), "RSA received but status is failure" }, { ERR_REASON(KAE_R_RSASEND_FAILURE), "RSA send failure" }, { ERR_REASON(KAE_R_GET_ALLOCED_HWMEM_FAILURE), "get memory from reserve memory failure" }, { ERR_REASON(KAE_R_FREE_ALLOCED_HWMEM_FAILURE), "free memory to reserve memory failure" }, { ERR_REASON(KAE_R_RSA_KEY_NOT_COMPELET), "rsa key param is not compeleted" }, { ERR_REASON(KAE_R_RSA_PADDING_FAILURE), "rsa padding failed" }, { ERR_REASON(KAE_R_DATA_TOO_LARGE_FOR_MODULUS), "data too large for modules" }, { ERR_REASON(KAE_R_DATA_GREATER_THEN_MOD_LEN), "data greater than mod len" }, { ERR_REASON(KAE_R_CHECKPADDING_FAILURE), "check rsa padding failure" }, { ERR_REASON(KAE_R_ERR_LIB_BN), "err in BN operation" }, { ERR_REASON(KAE_R_RSA_KEY_SIZE_TOO_SMALL), "data too small" }, { ERR_REASON(KAE_R_MODULE_TOO_LARGE), "data too large" }, { ERR_REASON(KAE_R_INVAILED_E_VALUE), "invailed e value" }, { ERR_REASON(KAE_R_UNKNOW_PADDING_TYPE), "unknow padding type" }, { ERR_REASON(KAE_R_INPUT_FIKE_LENGTH_ZERO), "input file length zero" }, { ERR_REASON(KAE_R_NEW_ENGINE_FAILURE), "get new engine failure" }, { ERR_REASON(KAE_R_BIND_ENGINE_FAILURE), "kae engine bind failure" }, { ERR_REASON(KAE_R_RSA_SET_METHODS_FAILURE), "rsa set kae methods failure" }, { ERR_REASON(KAE_R_PUBLIC_KEY_INVALID), "invalid public key" }, { ERR_REASON(KAE_R_PUBLIC_ENCRYPTO_FAILURE), "rsa public key encrypto failure" }, { ERR_REASON(KAE_R_PUBLIC_DECRYPTO_FAILURE), "rsa public key decrypto failure" }, { ERR_REASON(KAE_R_GET_PRIMEKEY_FAILURE), "rsa prime key generate failure" }, { ERR_REASON(KAE_R_ENGINE_ALREADY_DEFINED), "kae engine already defined, try to use engine id 'kae' instead." }, { 0, (const char *)NULL } }; int err_load_kae_strings(void) { if (g_kae_lib_error_code == 0) { g_kae_lib_error_code = ERR_get_next_error_library(); } if (g_kae_error_init) { g_kae_error_init = 0; ERR_load_strings(g_kae_lib_error_code, g_kae_str_functs); ERR_load_strings(g_kae_lib_error_code, g_kae_str_reasons); } return 1; } void err_unload_kae_strings(void) { if (g_kae_error_init == 0) { ERR_unload_strings(g_kae_lib_error_code, g_kae_str_functs); ERR_unload_strings(g_kae_lib_error_code, g_kae_str_reasons); g_kae_error_init = 1; } } void err_kae_error(int function, int reason, char *engine_file, int line) { if (g_kae_lib_error_code == 0) { g_kae_lib_error_code = ERR_get_next_error_library(); } ERR_PUT_error(g_kae_lib_error_code, function, reason, engine_file, line); } KAE/utils/engine_config.h0000644060212406010010000000202213616500010012462 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides interface of configuration file reading for the KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HISI_ACC_OPENSSL_CONFIG_H #define HISI_ACC_OPENSSL_CONFIG_H #include <stdio.h> #include <stdlib.h> #include <string.h> int kae_drv_get_item(const char *config_file, const char *v_pszSection, const char *v_pszItem, char *v_pszValue); #endif // HISI_ACC_OPENSSL_CONFIG_H KAE/LICENSE0000644060212406010010000002614013616500010007373 0ustar Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.KAE/engine_kae.c0000644060212406010010000002271013616500010010616 0ustar /* * Copyright (c) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for an OpenSSL KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef __USE_GNU #define __USE_GNU #endif #include <time.h> #include <sys/types.h> #include <dirent.h> #include "engine_kae.h" #include "engine_check.h" #include "engine_fork.h" #include "engine_utils.h" #include "async_poll.h" #include "sec_ciphers.h" #include "sec_digests.h" #include "hpre_rsa.h" #include "hpre_dh.h" #define KAE_CMD_ENABLE_ASYNC ENGINE_CMD_BASE #define PKEY_METHOD_TYPE_NUM 3 /* Engine id */ const char *g_engine_kae_id = "kae"; /* Engine name */ const char *g_engine_kae_name = "Kunpeng Accelerator Engine"; /* Support pkey method types */ const int g_pkey_method_types[PKEY_METHOD_TYPE_NUM] = {EVP_PKEY_RSA, EVP_PKEY_DH, EVP_PKEY_DHX}; static int g_bind_ref_count = 0; static int hpre_pkey_meths(ENGINE *e, EVP_PKEY_METHOD **pmeth, const int **pnids, int nid); static const ENGINE_CMD_DEFN g_kae_cmd_defns[] = { { KAE_CMD_ENABLE_ASYNC, "KAE_CMD_ENABLE_ASYNC", "Enable or Disable the engine async interface.", ENGINE_CMD_FLAG_NUMERIC}, { 0, NULL, NULL, 0 } }; /****************************************************************************** * function: * kae_engine_ctrl(ENGINE *e, int cmd, long i,void *p, void (*f)(void)) * * @param e [IN] - OpenSSL engine pointer * @param cmd [IN] - Control Command * @param i [IN] - Input Parameters for the command * @param p [IN] - Parameters for the command * @param f [IN] - Callback function * * description: * kae engine control functions. * Note: KAE_CMD_ENABLE_ASYNC should be called at the following * point during startup: * ENGINE_by_id * ---> ENGINE_ctrl_cmd(KAE_CMD_ENABLE_ASYNC) * ENGINE_init ******************************************************************************/ static int kae_engine_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f) (void)) { int ret = 1; (void)p; (void)f; if (unlikely(e == NULL)) { US_ERR("Null Enigne\n"); return 0; } switch (cmd) { case KAE_CMD_ENABLE_ASYNC: US_DEBUG("%s async polling\n", i == 0 ? "Disable" : "Enable"); if (i == 0) { kae_disable_async(); } else { kae_enable_async(); } break; default: US_WARN("CTRL command not implemented\n"); ret = 0; break; } return ret; } static int kae_engine_init(ENGINE *e) { UNUSED(e); return 1; } static int kae_engine_destroy(ENGINE *e) { UNUSED(e); if (__sync_sub_and_fetch(&g_bind_ref_count, 1) <= 0) { hpre_destroy(); hpre_dh_destroy(); sec_ciphers_free_ciphers(); sec_digests_free_methods(); err_unload_kae_strings(); kae_debug_close_log(); __sync_and_and_fetch(&g_bind_ref_count, 0); } return 1; } static int kae_engine_finish(ENGINE *e) { UNUSED(e); return 1; } static int kae_engine_setup(void) { if (__sync_add_and_fetch(&g_bind_ref_count, 1) == 1) { kae_debug_init_log(); /* Ensure the kae error handling is set up */ err_load_kae_strings(); if (!cipher_module_init()) { __sync_and_and_fetch(&g_bind_ref_count, 0); return 0; } if (!digest_module_init()) { __sync_and_and_fetch(&g_bind_ref_count, 0); return 0; } if (!hpre_module_init()) { __sync_and_and_fetch(&g_bind_ref_count, 0); return 0; } if (!hpre_module_dh_init()) { __sync_and_and_fetch(&g_bind_ref_count, 0); return 0; } if (!kae_checking_q_thread_init()) { __sync_and_and_fetch(&g_bind_ref_count, 0); return 0; } async_module_init(); pthread_atfork(engine_do_before_fork_handler, engine_init_parent_at_fork_handler, engine_init_child_at_fork_handler); } return 1; } int kae_get_device(const char* dev) { struct dirent *device = NULL; DIR *wd_class = NULL; int found = 0; const char* uacce_path = "/sys/class/uacce"; if (access(uacce_path, 0) != 0 || (dev == NULL)) { US_WARN("WD framework is not enabled on the system!\n"); return 0; } wd_class = opendir(uacce_path); if (wd_class == NULL) { US_WARN("uacce_path cant be opened!\n"); return 0; } while ((device = readdir(wd_class)) != NULL) { if (strstr(device->d_name, dev)) { found = 1; break; } } closedir(wd_class); return found == 1 ? 1 : 0; } static int hpre_check_meth_args(EVP_PKEY_METHOD **pmeth, const int **pnids, int nid) { if ((pnids == NULL) && ((pmeth == NULL) || (nid < 0))) { KAEerr(KAE_F_HPRE_PKEY_METHS, KAE_R_INPUT_PARAM_ERR); US_ERR("wd_engine_digests invalid input param."); if (pmeth != NULL) { *pmeth = NULL; } return OPENSSL_FAIL; } if (pmeth == NULL && pnids != NULL) { *pnids = g_pkey_method_types; return PKEY_METHOD_TYPE_NUM; } if (pmeth == NULL) { return OPENSSL_FAIL; } return HPRE_CONT; } static int hpre_pkey_meths(ENGINE *e, EVP_PKEY_METHOD **pmeth, const int **pnids, int nid) { UNUSED(e); int ret = hpre_check_meth_args(pmeth, pnids, nid); if (ret != HPRE_CONT) { return ret; } switch (nid) { case EVP_PKEY_RSA: *pmeth = get_rsa_pkey_meth(); break; case EVP_PKEY_DH: *pmeth = get_dh_pkey_meth(); break; case EVP_PKEY_DHX: *pmeth = (EVP_PKEY_METHOD *)EVP_PKEY_meth_find(EVP_PKEY_DHX); break; default: *pmeth = NULL; break; } return (*pmeth != NULL); } /****************************************************************************** * function: * bind_helper(ENGINE *e, * const char *id) * * @param e [IN] - OpenSSL engine pointer * @param id [IN] - engine id * * description: * Connect KAE engine to OpenSSL engine library ******************************************************************************/ static int bind_kae(ENGINE *e, const char *id) { int ret; (void)id; const char *sec_device = "hisi_sec"; static int loaded = 0; if (loaded) { US_ERR("ENGINE defined already!\n"); KAEerr(KAE_F_BIND_HELPER, KAE_R_ENGINE_ALREADY_DEFINED); return 0; } loaded = 1; #undef RETURN_FAIL_IF #define RETURN_FAIL_IF(cond, mesg, f, r) \ if (cond) { \ KAEerr(f, r); \ US_ERR(mesg); \ return 0; \ }\ if (!kae_engine_setup()) { US_ERR("ENGINE setup failed\n"); return 0; } ret = ENGINE_set_id(e, g_engine_kae_id); RETURN_FAIL_IF(ret != 1, "ENGINE_set_id failed.", KAE_F_BIND_HELPER, KAE_R_SET_ID_FAILURE); ret = ENGINE_set_name(e, g_engine_kae_name); RETURN_FAIL_IF(ret != 1, "ENGINE_set_name failed.", KAE_F_BIND_HELPER, KAE_R_SET_NAME_FAILURE); ret = kae_get_device(sec_device); if (ret != 0) { ret = ENGINE_set_ciphers(e, sec_engine_ciphers); RETURN_FAIL_IF(ret != 1, "ENGINE_set_ciphers failed.", KAE_F_BIND_HELPER, KAE_R_SET_CIPHERS_FAILURE); ret = ENGINE_set_digests(e, sec_engine_digests); RETURN_FAIL_IF(ret != 1, "ENGINE_set_digests failed.", KAE_F_BIND_HELPER, KAE_R_SET_DIGESTS_FAILURE); } ret = ENGINE_set_pkey_meths(e, hpre_pkey_meths); RETURN_FAIL_IF(ret != 1, "ENGINE_set_finish_function failed", KAE_F_BIND_HELPER, KAE_R_SET_PKEYMETH_FAILURE); ret = ENGINE_set_RSA(e, hpre_get_rsa_methods()); RETURN_FAIL_IF(ret != 1, "ENGINE_set_RSA failed.", KAE_F_BIND_HELPER, KAE_R_SET_RSA_FAILURE); ret = ENGINE_set_DH(e, hpre_get_dh_methods()); RETURN_FAIL_IF(ret != 1, "ENGINE_set_DH failed.", KAE_F_BIND_HELPER, KAE_R_SET_DH_FAILURE); ret = ENGINE_set_destroy_function(e, kae_engine_destroy); RETURN_FAIL_IF(ret != 1, "ENGINE_set_destroy_function failed.", KAE_F_BIND_HELPER, KAE_R_SET_DESTORY_FAILURE); ret = ENGINE_set_init_function(e, kae_engine_init); RETURN_FAIL_IF(ret != 1, "ENGINE_set_init_function failed.", KAE_F_BIND_HELPER, KAE_R_SET_INIT_FAILURE); ret = ENGINE_set_finish_function(e, kae_engine_finish); RETURN_FAIL_IF(ret != 1, "ENGINE_set_finish_function failed.", KAE_F_BIND_HELPER, KAE_R_SET_FINISH_FAILURE); ret &= ENGINE_set_ctrl_function(e, kae_engine_ctrl); ret &= ENGINE_set_cmd_defns(e, g_kae_cmd_defns); if (ret != 1) { US_ERR("Engine set ctrl function or defines failed\n"); return 0; } return 1; } IMPLEMENT_DYNAMIC_BIND_FN(bind_kae) IMPLEMENT_DYNAMIC_CHECK_FN() /*lint -e(10)*/ KAE/configure0000755060212406010010000000624713616500010010303 0ustar #! /bin/bash if [[ $# == 0 ]]; then openssl_path=/usr/local wd_path=/usr/local elif [[ $# == 1 ]]; then if [[ $1 == --openssl_path=* ]]; then openssl_path=${1#*=} wd_path=/usr/local elif [[ $1 == --wd_path=* ]]; then openssl_path=/usr/local wd_path=${1#*=} else echo "usage: ./configure [--openssl_path] [--wd_path=]" exit 1; fi elif [[ $# == 2 ]]; then if [ ${1%%=*} == --openssl_path ] && [ ${2%=*} == --wd_path ]; then openssl_path=${1#*=} wd_path=${2##*=} elif [ ${1%%=*} == --wd_path ] && [ ${2%=*} == --openssl_path ]; then wd_path=${1#*=} openssl_path=${2##*=} else echo "usage: ./configure [--openssl_path=] [--wd_path=]" exit 1; fi else echo "Too many options, option numbers less than or equal 2" exit 1; fi if [ -d $openssl_path ]; then echo "checking openssl install path... ok" else echo "No such file or directory" exit 1; fi if [ -d $wd_path ]; then echo "checking wd install path... ok" else echo "No such file or directory" exit 1; fi grep -n "OPENSSL_WORK_PATH" Makefile > /dev/null if [ $? = 0 ]; then sed -i "1d" Makefile sed -i "1i\export OPENSSL_WORK_PATH=$openssl_path" Makefile else sed -i "1i\export OPENSSL_WORK_PATH=$openssl_path" Makefile fi grep -n "LDFLAGS += -Wl,-rpath*" Makefile > /dev/null if [ $? = 0 ]; then line=($(grep -n "LDFLAGS += -Wl,-rpath*" Makefile | cut -d ':' -f 1)) sed -i "${line}d" Makefile sed -i "${line}i\LDFLAGS += -Wl,-rpath,$openssl_path/lib:$wd_path/lib" Makefile else line=($(grep -n "LDFLAGS += -Wl,-z,relro,-z,now,-z,noexecstack" Makefile | cut -d ':' -f 1)) sed -i "${line}a\LDFLAGS += -Wl,-rpath,$openssl_path/lib:$wd_path/lib" Makefile fi WD_PATH=./../drivers/warpdrive WD_SOURCE_PATH=/usr/local/include/warpdrive WD_RPM_PATH=/usr/include/warpdrive grep -n "INCDIR += -I ${WD_PATH} -I ${WD_PATH}/include" Makefile > /dev/null if [ $? = 0 ]; then include_line=($(grep -n "INCDIR += -I ${WD_PATH} -I ${WD_PATH}/include" Makefile | cut -d ':' -f 1)) sed -i "${include_line}d" Makefile fi grep -n "INCDIR += -I ${WD_RPM_PATH} -I ${WD_RPM_PATH}/include" Makefile > /dev/null if [ $? = 0 ]; then include_line=($(grep -n "INCDIR += -I ${WD_RPM_PATH} -I ${WD_RPM_PATH}/include" Makefile | cut -d ':' -f 1)) sed -i "${include_line}d" Makefile fi grep -n "INCDIR += -I ${WD_SOURCE_PATH} -I ${WD_SOURCE_PATH}/include" Makefile > /dev/null if [ $? = 0 ]; then include_line=($(grep -n "INCDIR += -I ${WD_SOURCE_PATH} -I ${WD_SOURCE_PATH}/include" Makefile | cut -d ':' -f 1)) sed -i "${include_line}d" Makefile fi include_line=($(grep -n "INCDIR += -I \$(OPENSSL_WORK_PATH)/include" Makefile | cut -d ':' -f 1)) if [ -f ${WD_PATH}/wd.h ]; then sed -i "${include_line}a\INCDIR += -I ${WD_PATH} -I ${WD_PATH}/include" Makefile elif [ -f ${WD_RPM_PATH}/wd.h ]; then sed -i "${include_line}a\INCDIR += -I ${WD_RPM_PATH} -I ${WD_RPM_PATH}/include" Makefile elif [ -f ${WD_SOURCE_PATH}/wd.h ]; then sed -i "${include_line}a\INCDIR += -I ${WD_SOURCE_PATH} -I ${WD_SOURCE_PATH}/include" Makefile else echo "Warpdrive have not installed!" fi KAE/README.md0000644060212406010010000001727213616500010007653 0ustar # Kunpeng Acceleration Engine - [Introduction](#introduction) - [License](#license) - [Requirements](#requirements) - [Installation Instructions](#installation-instructions) - [Building OpenSSL](#building-openssl) - [Cloning and Building Kunpeng Acceleration Engine](#cloning-and-building-kunpeng-acceleration-engine) - [Testing Kunpeng Accelerator Engine](#testing-kunpeng-accelerator-engine) - [Examples](#examples) - [Troubleshooting](#troubleshooting) - [Loading Engines by Setting the OpenSSL Configuration File](#loading-engines-by-setting-the-openssl-configuration-file) - [More Information](#more-information) - [Copyright](#copyright) ## Introduction Kunpeng Acceleration Engine is a new technology within Hisilicon Kunpeng 920 processors which provides a hardware-enabled foundation for security, authentication, and compression. It significantly increases the performance across cloud, networking, big data, and storage applications and platforms. Kunpeng Acceleration Engine includes symmetric encryption, asymmetric encryption, digital signatures, and RSA for accelerating SSL/TLS application, which makes processors more efficient and reduces hardware costs. By accelerating SSL/TLS with Kunpeng Acceleration Engine, you can: - Support higher-performance secured tunnels and a greater number of authenticated clients - Have higher-performance encrypted traffic throughout a secured network - Accelerate compute-intense symmetric and asymmetric cryptography - Have greater platform application efficiency - Have higher-performance compression and decompression - Maximize CPU utilization So far, the algorithms supported by Kunpeng Acceleration Engine are: - Asymmetric encryption algorithm: RSA Support Key Sizes 1024/2048/3072/4096 - Digest algorithm: SM3 - Block cipher algorithm: SM4 Support CTR/XTS/CBC - Block cipher algorithm: AES Support CTR/XTS/CBC/ECB - Key exchange algorithm: DH Support 768bit/1024bit/1536bit/2048bit/3072bit/4096bit ## License It is licensed under the [APACHE LICENSE, VERSION 2.0](https://www.apache.org/licenses/LICENSE-2.0 ). For more information, see the LICENSE file. ## Requirements * CPU: Kunpeng 920 * Operating System: * CentOS 7.6 4.14.0-115.el7a.0.1.aarch64 version * SuSE 15.1 4.12.14-195-default arch64 version * NeoKylin 7.6 4.14.0-115.5.1.el7a.06.aarch64 version * EulerOS 2.8 4.19.36-vhulk1907.1.0.h410.eulerosv2r8.aarch64 version * OpenSSL 1.1.1a or later OpenSSL ## Installation Instructions ### Building OpenSSL Clone OpenSSL from Github at the following location: git clone https://github.com/openssl/openssl.git You are advised to check out and build the OpenSSL 1.1.1a git tag specified in the release notes. Versions of OpenSSL before OpenSSL 1.1.0 are not supported. Note: You are not advised to install the accelerated version of OpenSSL as your default system library. Otherwise, acceleration may be used unexpectedly by other applications on the system, resulting in undesired/unsupported behavior. The `--prefix` can be used with the `./config` command to specify the location that `make install` will copy files to. Please see the OpenSSL INSTALL file for full details on usage of the `--prefix` option. By default, we usually install OpenSSL as follows: ./config -Wl,-rpath=/usr/local/lib make make install The `-Wl,-rpath` optioncan specify the openssl shared libraries where the binaries will link to. ### Cloning and Building Kunpeng Acceleration Engine Clone the Github repository containing the Kunpeng Accelerator Engine: git clone https://github.com/kunpengcompute/KAE Download the release version of Kunpeng Accelerator Engine Driver from: <https://github.com/kunpengcompute/KAEdriver> Firstly, build and install the accelerator driver: Note: To build the Kunpeng Accelerator Engine Driver, install the `kernel-devel` package first. ``` tar -zxf Kunpeng_KAE_driver.tar.gz cd kae_driver make make install modprobe uacce modprobe hisi_qm modprobe hisi_sec2 modprobe hisi_hpre modprobe hisi_zip modprobe hisi_rde ``` Secondly, install the accelerator library: ``` cd warpdrive sh autogen.sh ./configure make make install ``` Then, install the Kunpeng Accelerator Engine: ``` cd KAE chmod +x configure ./configure make make install ``` Note: The `--openssl_path` can be used with the `./configure` command to specify the location that `make install` will copy files to. The default installation path for the accelerator engine is `/usr/local/lib/openssl-1.1`. You are advised to install the Kunpeng Accelerator Engine by default as the OpenSSL; Export the environment variableas `OPENSSL_ENGINES` as follows : ``` export OPENSSL_ENGINES=/usr/local/lib/engines-1.1 ``` ### Testing Kunpeng Accelerator Engine Run the following command to check when the Kunpeng Accelerator Engine is loaded correctly: ``` cd /usr/local/bin/ ./openssl genrsa -out test.key -engine kae 2048 ./openssl rsa -in test.key -pubout -out test_pub.key -engine kae ./openssl rsautl -encrypt -in rsa_test -inkey test_pub.key -pubin -out rsa_test.en -engine kae ./openssl rsautl -decrypt -in rsa_test.en -inkey test.key -out rsa_test.de -engine kae ``` ``` ./openssl enc -sm4-cbc -a -in sm4_test -out sm4_test.en -pass pass:123456 -engine kae ./openssl enc -sm4-cbc -a -in sm4_test -out sm4_test.en -pass pass:123456 -p -engine kae ./openssl enc -sm4-cbc -d -a -in sm4_test.en -out sm4_test.de -pass pass:123456 -engine kae ./openssl sm3 -out sm3_out -engine kae sm3_test ``` ## Examples Here is an example to show you how to use the Kunpeng Accelerator Engine. ``` #include <stdio.h> #include <stdlib.h> /* OpenSSL headers */ #include <openssl/bio.h> #include <openssl/ssl.h> #include <openssl/err.h> #include <openssl/engine.h> int main(int argc, char **argv) { /* Initializing OpenSSL */ SSL_load_error_strings(); ERR_load_BIO_strings(); OpenSSL_add_all_algorithms(); /*You can use ENGINE_by_id Function to get the handle of the Kunpeng Accelerator Engine*/ ENGINE *e = ENGINE_by_id("kae"); ENGINE_init(e); /*The user code To Do */ ... ENGINE_free(e); } ``` ## Troubleshooting The most likely failure point is that the Kunpeng Accelerator Engine is not loaded successfully. If this occurs: 1. Check that the accelerator driver has been loaded successfully by running the `lsmod` command. `uacce.ko, hisi_qm.ko, sgl.ko, hisi_sec2.ko, hisi_hpre.ko, hisi_zip.ko` should be in the list. 2. Check that the paths have been set correctly so that the `libkae.so` engine file can be copied to the correct location. 3. Check that the installation path has been correctly added to the environment variable `OPENSSL_ENGINES` and exported to the shell by running the `export` command. ## Loading Engines by Setting the OpenSSL Configuration File By setting up the OpenSSL configuration file, you can also initialize the Kunpeng Accelerator Engine for your OpenSSL application. For further details on using the `openssl.cnf` file, see the OpenSSL online documentation at: <https://www.openssl.org/docs/man1.1.0/apps/config.html> Here is an example to show you how to set up the `openssl.cnf` file to load engines. Add the following statements to the global section (assuming that the path is the one that KAE installed): openssl_conf = openssl_engine_init [ openssl_engine_init ] engines = engine_section [ engine_section ] kae = kae_section [ kae_section ] engine_id = kae dynamic_path = /usr/local/lib/engines-1.1/kae.so ## More Information For further assistance, contact Huawei Support at: <https://support.huawei.com> <https://www.huaweicloud.com/kunpeng/software/accelerator.html> ## Copyright Copyright © 2018 Huawei Corporation. All rights reserved. KAE/alg/0000755060212406010010000000000013616500010007126 5ustar KAE/alg/digests/0000755060212406010010000000000013616500010010570 5ustar KAE/alg/digests/sec_digests.c0000644060212406010010000004127713616500010013243 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine digests * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sec_digests.h" #include "sec_digests_soft.h" #include "sec_digests_wd.h" #include "engine_check.h" #include "engine_utils.h" #include "engine_types.h" #include "engine_log.h" #include "async_callback.h" #include "async_event.h" #include "async_task_queue.h" struct digest_info { int nid; EVP_MD *digest; }; static struct digest_info g_sec_digests_info[] = { { NID_sm3, NULL }, }; #define DIGESTS_COUNT (BLOCKSIZES_OF(g_sec_digests_info)) static int g_known_digest_nids[DIGESTS_COUNT] = { NID_sm3, }; #define SEC_DIGESTS_RETURN_FAIL_IF(cond, mesg, ret) \ if (unlikely(cond)) {\ US_ERR(mesg); \ return (ret); \ }\ static int sec_digests_init(EVP_MD_CTX *ctx); static int sec_digests_update(EVP_MD_CTX *ctx, const void *data, size_t data_len); static int sec_digests_final(EVP_MD_CTX *ctx, unsigned char *digest); static int sec_digests_cleanup(EVP_MD_CTX *ctx); static int sec_digests_dowork(sec_digest_priv_t *md_ctx); static int sec_digests_sync_dowork(sec_digest_priv_t *md_ctx); static int sec_digests_async_dowork(sec_digest_priv_t *md_ctx, op_done_t *op_done); static void sec_digests_get_alg(sec_digest_priv_t *md_ctx) { switch (md_ctx->e_nid) { case NID_sm3: md_ctx->d_alg = WCRYPTO_SM3; md_ctx->out_len = SM3_LEN; break; default: US_WARN("nid=%d don't support by sec engine.", md_ctx->e_nid); break; } } int sec_digests_init(EVP_MD_CTX *ctx) { sec_digest_priv_t *md_ctx = NULL; if (unlikely(ctx == NULL)) { return OPENSSL_FAIL; } md_ctx = (sec_digest_priv_t *)EVP_MD_CTX_md_data(ctx); if (unlikely(md_ctx == NULL)) { return OPENSSL_FAIL; } memset((void *)md_ctx, 0, sizeof(sec_digest_priv_t)); int nid = EVP_MD_nid(EVP_MD_CTX_md(ctx)); md_ctx->e_nid = nid; sec_digests_get_alg(md_ctx); md_ctx->state = SEC_DIGEST_INIT; return OPENSSL_SUCCESS; } static int sec_digests_update_inner(sec_digest_priv_t *md_ctx, size_t data_len, const void *data) { int ret = OPENSSL_FAIL; size_t left_len = data_len; const unsigned char* tmpdata = (const unsigned char *)data; while (md_ctx->last_update_bufflen + left_len > INPUT_CACHE_SIZE) { int copy_to_bufflen = INPUT_CACHE_SIZE - md_ctx->last_update_bufflen; kae_memcpy(md_ctx->last_update_buff + md_ctx->last_update_bufflen, tmpdata, copy_to_bufflen); md_ctx->last_update_bufflen = INPUT_CACHE_SIZE; left_len -= copy_to_bufflen; tmpdata += copy_to_bufflen; if (md_ctx->state == SEC_DIGEST_INIT) { md_ctx->state = SEC_DIGEST_FIRST_UPDATING; } else if (md_ctx->state == SEC_DIGEST_FIRST_UPDATING) { md_ctx->state = SEC_DIGEST_DOING; } else { (void)md_ctx->state; } ret = sec_digests_sync_dowork(md_ctx); if (ret != KAE_SUCCESS) { US_WARN("do sec digest failed, switch to soft digest"); goto do_soft_digest; } md_ctx->last_update_bufflen = 0; if (left_len <= INPUT_CACHE_SIZE) { md_ctx->last_update_bufflen = left_len; kae_memcpy(md_ctx->last_update_buff, tmpdata, md_ctx->last_update_bufflen); break; } } return OPENSSL_SUCCESS; do_soft_digest: if (md_ctx->state == SEC_DIGEST_FIRST_UPDATING && md_ctx->last_update_buff && md_ctx->last_update_bufflen != 0) { md_ctx->switch_flag = 1; sec_digests_soft_init(md_ctx->soft_ctx, md_ctx->e_nid); ret = sec_digests_soft_update(md_ctx->soft_ctx, md_ctx->last_update_buff, md_ctx->last_update_bufflen, md_ctx->e_nid); ret &= sec_digests_soft_update(md_ctx->soft_ctx, tmpdata, left_len, md_ctx->e_nid); return ret; } else { US_ERR("do sec digest failed"); return OPENSSL_FAIL; } } static int sec_digests_update(EVP_MD_CTX *ctx, const void *data, size_t data_len) { SEC_DIGESTS_RETURN_FAIL_IF(unlikely(!ctx || !data), "ctx is NULL.", OPENSSL_FAIL); sec_digest_priv_t *md_ctx = (sec_digest_priv_t *)EVP_MD_CTX_md_data(ctx); SEC_DIGESTS_RETURN_FAIL_IF(unlikely(md_ctx == NULL), "md_ctx is NULL.", OPENSSL_FAIL); if (md_ctx->soft_ctx == NULL) { md_ctx->soft_ctx = EVP_MD_CTX_new(); } if (md_ctx->switch_flag) { return sec_digests_soft_update(md_ctx->soft_ctx, data, data_len, md_ctx->e_nid); } if (md_ctx->last_update_buff == NULL) { md_ctx->last_update_buff = (unsigned char *)kae_malloc(INPUT_CACHE_SIZE); if (md_ctx->last_update_buff == NULL) { US_WARN("NO MEM to alloc ctx->in"); return OPENSSL_FAIL; } } int nid = EVP_MD_nid(EVP_MD_CTX_md(ctx)); md_ctx->e_nid = nid; sec_digests_get_alg(md_ctx); unsigned char digest[MAX_OUTLEN] = {0}; md_ctx->out = digest; if (md_ctx->last_update_bufflen + data_len <= INPUT_CACHE_SIZE) { kae_memcpy(md_ctx->last_update_buff + md_ctx->last_update_bufflen, data, data_len); md_ctx->last_update_bufflen += data_len; return OPENSSL_SUCCESS; } return sec_digests_update_inner(md_ctx, data_len, data); } static int sec_digests_final(EVP_MD_CTX *ctx, unsigned char *digest) { int ret = KAE_FAIL; SEC_DIGESTS_RETURN_FAIL_IF(!ctx || !digest, "ctx is NULL.", OPENSSL_FAIL); sec_digest_priv_t *md_ctx = (sec_digest_priv_t *)EVP_MD_CTX_md_data(ctx); SEC_DIGESTS_RETURN_FAIL_IF(unlikely(md_ctx == NULL), "md_ctx is NULL.", OPENSSL_FAIL); if (md_ctx->switch_flag) { ret = sec_digests_soft_final(md_ctx->soft_ctx, digest, md_ctx->e_nid); goto end; } if (md_ctx->last_update_bufflen == 0) { US_WARN("no data input, swich to soft digest"); goto do_soft_digest; } if (md_ctx->last_update_buff && md_ctx->last_update_bufflen != 0) { if (md_ctx->state == SEC_DIGEST_INIT && md_ctx->last_update_bufflen < MIN_DIGEST_LEN) { US_WARN_LIMIT("small package offload, switch to soft digest"); goto do_soft_digest; } uint32_t tmp = md_ctx->state; md_ctx->state = SEC_DIGEST_FINAL; md_ctx->out = digest; ret = sec_digests_dowork(md_ctx); if (ret != KAE_SUCCESS) { US_WARN("do sec digest failed, switch to soft digest"); md_ctx->state = tmp; goto do_soft_digest; } ret = OPENSSL_SUCCESS; } US_DEBUG("do digest success. ctx=%p", md_ctx); end: sec_digests_soft_cleanup(md_ctx); if (md_ctx->e_digest_ctx != NULL) { (void)wd_digests_put_engine_ctx(md_ctx->e_digest_ctx); md_ctx->e_digest_ctx = NULL; } return ret; do_soft_digest: if (md_ctx->state == SEC_DIGEST_INIT) { sec_digests_soft_work(md_ctx, md_ctx->last_update_bufflen, digest); ret = OPENSSL_SUCCESS; } else { US_ERR("do sec digest failed"); ret = OPENSSL_FAIL; } if (md_ctx->e_digest_ctx != NULL) { (void)wd_digests_put_engine_ctx(md_ctx->e_digest_ctx); md_ctx->e_digest_ctx = NULL; } return ret; } static void sec_digests_update_md_ctx(sec_digest_priv_t* md_ctx) { if (md_ctx->do_digest_len == 0) { return; } md_ctx->in += md_ctx->do_digest_len; } static int sec_digests_dowork(sec_digest_priv_t *md_ctx) { int ret = KAE_FAIL; // add async parm int job_ret; op_done_t op_done; SEC_DIGESTS_RETURN_FAIL_IF(md_ctx->last_update_bufflen <= 0, "in length less than or equal to zero.", KAE_FAIL); // packageSize>input_cache_size if (md_ctx->last_update_bufflen > INPUT_CACHE_SIZE) { ret = sec_digests_sync_dowork(md_ctx); if (ret != 0) { US_ERR("sec digest sync fail"); return ret; } return KAE_SUCCESS; } // async async_init_op_done(&op_done); if (op_done.job != NULL && kae_is_async_enabled()) { if (async_setup_async_event_notification(0) == 0) { US_ERR("sec async event notifying failed"); async_cleanup_op_done(&op_done); return KAE_FAIL; } } else { US_DEBUG("NO ASYNC Job or async disable, back to SYNC!"); async_cleanup_op_done(&op_done); return sec_digests_sync_dowork(md_ctx); } if (sec_digests_async_dowork(md_ctx, &op_done) == KAE_FAIL) goto err; do { job_ret = async_pause_job(op_done.job, ASYNC_STATUS_OK); if ((job_ret == 0)) { US_DEBUG("- pthread_yidle -"); kae_pthread_yield(); } } while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); if (op_done.verifyRst < 0) { US_ERR("verify result failed with %d", op_done.verifyRst); async_cleanup_op_done(&op_done); return KAE_FAIL; } async_cleanup_op_done(&op_done); US_DEBUG(" Digest Async Job Finish! md_ctx = %p\n", md_ctx); return KAE_SUCCESS; err: US_ERR("async job err"); (void)async_clear_async_event_notification(); async_cleanup_op_done(&op_done); return KAE_FAIL; } static int sec_digests_sync_dowork(sec_digest_priv_t *md_ctx) { SEC_DIGESTS_RETURN_FAIL_IF(md_ctx == NULL, "md_ctx is NULL.", KAE_FAIL); if (md_ctx->e_digest_ctx == NULL) { md_ctx->e_digest_ctx = wd_digests_get_engine_ctx(md_ctx); if (md_ctx->e_digest_ctx == NULL) { US_WARN("failed to get engine ctx, switch to soft digest"); return KAE_FAIL; } } digest_engine_ctx_t *e_digest_ctx = md_ctx->e_digest_ctx; md_ctx->in = md_ctx->last_update_buff; uint32_t leftlen = md_ctx->last_update_bufflen; while (leftlen != 0) { md_ctx->do_digest_len = wd_digests_get_do_digest_len(e_digest_ctx, leftlen); wd_digests_set_input_data(e_digest_ctx); int ret = wd_digests_doimpl(e_digest_ctx); if (ret != KAE_SUCCESS) { return ret; } wd_digests_get_output_data(e_digest_ctx); sec_digests_update_md_ctx(md_ctx); leftlen -= md_ctx->do_digest_len; } US_DEBUG("sec do digest success."); return KAE_SUCCESS; } static int sec_digests_async_dowork(sec_digest_priv_t *md_ctx, op_done_t *op_done) { int ret = 0; int cnt = 0; enum task_type type = ASYNC_TASK_DIGEST; SEC_DIGESTS_RETURN_FAIL_IF(md_ctx == NULL, "md_ctx is NULL.", KAE_FAIL); if (md_ctx->e_digest_ctx == NULL) { md_ctx->e_digest_ctx = wd_digests_get_engine_ctx(md_ctx); if (md_ctx->e_digest_ctx == NULL) { US_WARN("failed to get engine ctx, switch to soft digest"); return KAE_FAIL; } } digest_engine_ctx_t *e_digest_ctx = md_ctx->e_digest_ctx; SEC_DIGESTS_RETURN_FAIL_IF(e_digest_ctx == NULL, "e_digest_ctx is NULL", KAE_FAIL); void *tag = e_digest_ctx; md_ctx->in = md_ctx->last_update_buff; uint32_t leftlen = md_ctx->last_update_bufflen; md_ctx->do_digest_len = wd_digests_get_do_digest_len(e_digest_ctx, leftlen); wd_digests_set_input_data(e_digest_ctx); do { if (cnt > MAX_SEND_TRY_CNTS) { break; } ret = wcrypto_do_digest(e_digest_ctx->wd_ctx, &e_digest_ctx->op_data, tag); if (ret == -WD_EBUSY) { if ((async_wake_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || async_pause_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0)) { US_ERR("sec wake job or sec pause job fail!\n"); ret = 0; break; } cnt++; } } while (ret == -WD_EBUSY); if (ret != WD_SUCCESS) { US_ERR("sec async wcryto do cipher failed"); return KAE_FAIL; } if (async_add_poll_task(e_digest_ctx, op_done, type) == 0) { US_ERR("sec add task failed "); return KAE_FAIL; } return KAE_SUCCESS; } static int sec_digests_cleanup(EVP_MD_CTX *ctx) { return OPENSSL_SUCCESS; } /** * desc:bind digest func as hardware function * @return */ static EVP_MD *sec_set_digests_methods(struct digest_info digestinfo) { const EVP_MD *default_digest = NULL; if (digestinfo.digest == NULL) { switch (digestinfo.nid) { case NID_sm3: default_digest = EVP_sm3(); break; default: return NULL; } } digestinfo.digest = (EVP_MD *)EVP_MD_meth_dup(default_digest); if (digestinfo.digest == NULL) { US_ERR("dup digest failed!"); return NULL; } EVP_MD_meth_set_init(digestinfo.digest, sec_digests_init); EVP_MD_meth_set_update(digestinfo.digest, sec_digests_update); EVP_MD_meth_set_final(digestinfo.digest, sec_digests_final); EVP_MD_meth_set_cleanup(digestinfo.digest, sec_digests_cleanup); EVP_MD_meth_set_app_datasize(digestinfo.digest, sizeof(sec_digest_priv_t)); return digestinfo.digest; } static void sec_create_digests(void) { unsigned int i = 0; for (i = 0; i < DIGESTS_COUNT; i++) { if (g_sec_digests_info[i].digest == NULL) { g_sec_digests_info[i].digest = sec_set_digests_methods(g_sec_digests_info[i]); } } } /****************************************************************************** * function: * sec_engine_digests(ENGINE *e, * const EVP_digest **digest, * const int **nids, * int nid) * * @param e [IN] - OpenSSL engine pointer * @param digest [IN] - digest structure pointer * @param nids [IN] - digest function nids * @param nid [IN] - digest operation id * * description: * kae engine digest operations registrar ******************************************************************************/ int sec_engine_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid) { UNUSED(e); unsigned int i = 0; if ((nids == NULL) && ((digest == NULL) || (nid < 0))) { US_ERR("sec_engine_digests invalid input param."); if (digest != NULL) { *digest = NULL; } return OPENSSL_FAIL; } /* No specific digest => return a list of supported nids ... */ /* No specific digest => return a list of supported nids ... */ if (digest == NULL) { if (nids != NULL) { *nids = g_known_digest_nids;; } return BLOCKSIZES_OF(g_sec_digests_info); } for (i = 0; i < DIGESTS_COUNT; i++) { if (g_sec_digests_info[i].nid == nid) { if (g_sec_digests_info[i].digest == NULL) { sec_create_digests(); } *digest = g_sec_digests_info[i].digest; return OPENSSL_SUCCESS; } } US_WARN("nid = %d not support.", nid); *digest = NULL; return OPENSSL_FAIL; } void sec_digests_free_methods(void) { unsigned int i = 0; for (i = 0; i < DIGESTS_COUNT; i++) { if (g_sec_digests_info[i].digest != NULL) { EVP_MD_meth_free(g_sec_digests_info[i].digest); g_sec_digests_info[i].digest = NULL; } } } void sec_digests_cb(const void *msg, void *tag) { if (!msg || !tag) { US_ERR("sec cb params err!\n"); return; } struct wcrypto_digest_msg *message = (struct wcrypto_digest_msg *)msg; digest_engine_ctx_t *e_digest_ctx = (digest_engine_ctx_t *)tag; kae_memcpy(e_digest_ctx->md_ctx->out, message->out, message->out_bytes); } // async poll thread create int sec_digest_engine_ctx_poll(void *engnine_ctx) { int ret = 0; digest_engine_ctx_t *e_digest_ctx = (digest_engine_ctx_t *)engnine_ctx; struct wd_queue *q = e_digest_ctx->q_node->kae_wd_queue; POLL_AGAIN: ret = wcrypto_digest_poll(q, 1); if (!ret) { goto POLL_AGAIN; } else if (ret < 0) { US_ERR("digest poll failed\n"); return ret; } return ret; } int digest_module_init(void) { wd_digests_init_qnode_pool(); sec_create_digests(); // reg async interface here async_register_poll_fn(ASYNC_TASK_DIGEST, sec_digest_engine_ctx_poll); return 1; } KAE/alg/digests/sec_digests_wd.c0000644060212406010010000001761713616500010013736 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine utils dealing with wrapdrive * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sec_digests_wd.h" #include "wd_queue_memory.h" #include "engine_utils.h" #include "engine_types.h" #include "engine_log.h" static KAE_QUEUE_POOL_HEAD_S* g_sec_digests_qnode_pool = NULL; static digest_engine_ctx_t* wd_digests_new_engine_ctx(KAE_QUEUE_DATA_NODE_S* q_node, sec_digest_priv_t* md_ctx); static int wd_digests_init_engine_ctx(digest_engine_ctx_t *e_digest_ctx); void wd_digests_free_engine_ctx(void* digest_ctx) { digest_engine_ctx_t* e_digest_ctx = (digest_engine_ctx_t *)digest_ctx; if (e_digest_ctx == NULL) { return; } if (e_digest_ctx->op_data.in && e_digest_ctx->setup.br.usr) { e_digest_ctx->setup.br.free(e_digest_ctx->setup.br.usr, (void *)e_digest_ctx->op_data.in); e_digest_ctx->op_data.in = NULL; } if (e_digest_ctx->op_data.out && e_digest_ctx->setup.br.usr) { e_digest_ctx->setup.br.free(e_digest_ctx->setup.br.usr, (void *)e_digest_ctx->op_data.out); e_digest_ctx->op_data.out = NULL; } OPENSSL_free(e_digest_ctx); e_digest_ctx = NULL; return; } static digest_engine_ctx_t* wd_digests_new_engine_ctx(KAE_QUEUE_DATA_NODE_S* q_node, sec_digest_priv_t* md_ctx) { digest_engine_ctx_t *e_digest_ctx = NULL; e_digest_ctx = (digest_engine_ctx_t *)OPENSSL_malloc(sizeof(digest_engine_ctx_t)); if (e_digest_ctx == NULL) { US_ERR("digest engine_ctx malloc fail."); return NULL; } kae_memset(e_digest_ctx, 0, sizeof(digest_engine_ctx_t)); e_digest_ctx->setup.br.alloc = kae_wd_alloc_blk; e_digest_ctx->setup.br.free = kae_wd_free_blk; e_digest_ctx->setup.br.iova_map = kae_dma_map; e_digest_ctx->setup.br.iova_unmap = kae_dma_unmap; e_digest_ctx->setup.br.usr = q_node->kae_queue_mem_pool; e_digest_ctx->op_data.in = e_digest_ctx->setup.br.alloc(e_digest_ctx->setup.br.usr, DIGEST_BLOCK_SIZE); if (e_digest_ctx->op_data.in == NULL) { US_ERR("alloc opdata in buf failed"); goto err; } e_digest_ctx->op_data.out = e_digest_ctx->setup.br.alloc(e_digest_ctx->setup.br.usr, DIGEST_BLOCK_SIZE); if (e_digest_ctx->op_data.out == NULL) { US_ERR("alloc opdata out buf failed"); goto err; } e_digest_ctx->md_ctx = md_ctx; // point to each other e_digest_ctx->q_node = q_node; // point to each other q_node->engine_ctx = e_digest_ctx; // point to each other return e_digest_ctx; err: wd_digests_free_engine_ctx(e_digest_ctx); return NULL; } static int wd_digests_init_engine_ctx(digest_engine_ctx_t *e_digest_ctx) { struct wd_queue *q = e_digest_ctx->q_node->kae_wd_queue; sec_digest_priv_t* md_ctx = e_digest_ctx->md_ctx; if (e_digest_ctx->wd_ctx != NULL) { US_WARN("wd ctx is in used by other digests"); return KAE_FAIL; } e_digest_ctx->setup.alg = (enum wcrypto_digest_alg)md_ctx->d_alg; // for example: WD_SM3; e_digest_ctx->setup.mode = WCRYPTO_DIGEST_NORMAL; e_digest_ctx->setup.cb = (wcrypto_cb)sec_digests_cb; e_digest_ctx->wd_ctx = wcrypto_create_digest_ctx(q, &e_digest_ctx->setup); if (e_digest_ctx->wd_ctx == NULL) { US_ERR("wd create sec digest ctx fail!"); return KAE_FAIL; } return KAE_SUCCESS; } digest_engine_ctx_t* wd_digests_get_engine_ctx(sec_digest_priv_t* md_ctx) { KAE_QUEUE_DATA_NODE_S *q_node = NULL; digest_engine_ctx_t *e_digest_ctx = NULL; if (unlikely(md_ctx == NULL)) { US_WARN("sec digest priv ctx NULL!"); return NULL; } q_node = kae_get_node_from_pool(g_sec_digests_qnode_pool); if (q_node == NULL) { US_ERR_LIMIT("failed to get hardware queue"); return NULL; } e_digest_ctx = (digest_engine_ctx_t *)q_node->engine_ctx; if (e_digest_ctx == NULL) { e_digest_ctx = wd_digests_new_engine_ctx(q_node, md_ctx); if (e_digest_ctx == NULL) { US_WARN("sec new engine ctx fail!"); (void)kae_put_node_to_pool(g_sec_digests_qnode_pool, q_node); return NULL; } } e_digest_ctx->md_ctx = md_ctx; md_ctx->e_digest_ctx = e_digest_ctx; if (wd_digests_init_engine_ctx(e_digest_ctx) == KAE_FAIL) { US_WARN("init engine ctx fail!"); wd_digests_put_engine_ctx(e_digest_ctx); return NULL; } return e_digest_ctx; } void wd_digests_put_engine_ctx(digest_engine_ctx_t* e_digest_ctx) { if (unlikely(e_digest_ctx == NULL)) { US_WARN("sec digest engine ctx NULL!"); return; } if (e_digest_ctx->md_ctx->last_update_buff != NULL) { kae_free(e_digest_ctx->md_ctx->last_update_buff); } if (e_digest_ctx->wd_ctx != NULL) { wcrypto_del_digest_ctx(e_digest_ctx->wd_ctx); e_digest_ctx->wd_ctx = NULL; } if (e_digest_ctx->q_node != NULL) { (void)kae_put_node_to_pool(g_sec_digests_qnode_pool, e_digest_ctx->q_node); } e_digest_ctx = NULL; return; } int wd_digests_doimpl(digest_engine_ctx_t *e_digest_ctx) { int ret; int trycount = 0; if (unlikely(e_digest_ctx == NULL)) { US_ERR("do digest ctx NULL!"); return KAE_FAIL; } again: ret = wcrypto_do_digest(e_digest_ctx->wd_ctx, &e_digest_ctx->op_data, NULL); if (ret != WD_SUCCESS) { if (ret == WD_EBUSY && trycount <= 5) { // try 5 times US_WARN("do digest busy, retry again!"); trycount++; goto again; } else { US_ERR("do digest failed!"); return KAE_FAIL; } } return KAE_SUCCESS; } void wd_digests_set_input_data(digest_engine_ctx_t *e_digest_ctx) { // fill engine ctx opdata sec_digest_priv_t* md_ctx = e_digest_ctx->md_ctx; kae_memcpy((uint8_t *)e_digest_ctx->op_data.in, md_ctx->in, md_ctx->do_digest_len); e_digest_ctx->op_data.in_bytes = md_ctx->do_digest_len; e_digest_ctx->op_data.out_bytes = md_ctx->out_len; e_digest_ctx->op_data.has_next = (md_ctx->state == SEC_DIGEST_FINAL) ? false : true; } inline void wd_digests_get_output_data(digest_engine_ctx_t *e_digest_ctx) { sec_digest_priv_t* md_ctx = e_digest_ctx->md_ctx; // the real out data start at opdata.out + offset if (e_digest_ctx->op_data.has_next == false) { kae_memcpy(md_ctx->out, (uint8_t*)e_digest_ctx->op_data.out, md_ctx->out_len); } } inline uint32_t wd_digests_get_do_digest_len(digest_engine_ctx_t *e_digest_ctx, int leftlen) { uint32_t do_digest_len = 0; int max_input_datalen = DIGEST_BLOCK_SIZE; /* * Note: Small encrypted block can be encrypted once. * or the last encrypted slice of a large encrypted block */ if (leftlen <= max_input_datalen) { do_digest_len = leftlen; } else { do_digest_len = max_input_datalen; } return do_digest_len; } KAE_QUEUE_POOL_HEAD_S* wd_digests_get_qnode_pool(void) { return g_sec_digests_qnode_pool; } int wd_digests_init_qnode_pool(void) { kae_queue_pool_destroy(g_sec_digests_qnode_pool, wd_digests_free_engine_ctx); g_sec_digests_qnode_pool = kae_init_queue_pool(WCRYPTO_DIGEST); if (g_sec_digests_qnode_pool == NULL) { US_ERR("do digest ctx NULL!"); return KAE_FAIL; } return KAE_SUCCESS; }KAE/alg/digests/sec_digests.h0000644060212406010010000000461413616500010013242 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the digest interface for KAE engine * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef SEC_DIGESTS_H #define SEC_DIGESTS_H #include <openssl/engine.h> #include <openssl/evp.h> #include "wd_digest.h" #include "wd_queue_memory.h" #include "engine_types.h" #include "engine_utils.h" #define MAX_SEND_TRY_CNTS 50 #define MIN_DIGEST_LEN 512 #define INPUT_CACHE_SIZE (512 * 1024) #define SM3_LEN 32 #define MAX_OUTLEN 64 enum sec_digest_state { SEC_DIGEST_INIT = 0, SEC_DIGEST_FIRST_UPDATING, SEC_DIGEST_DOING, SEC_DIGEST_FINAL }; typedef struct digest_engine_ctx digest_engine_ctx_t; typedef struct sec_digest_priv sec_digest_priv_t; struct sec_digest_priv { uint8_t* last_update_buff; uint8_t* in; uint8_t* out; uint32_t d_mode; // haven't used uint32_t d_alg; uint32_t state; uint32_t last_update_bufflen; uint32_t do_digest_len; // do one cycle digest length uint32_t out_len; // digest out length uint32_t e_nid; // digest nid digest_engine_ctx_t* e_digest_ctx; EVP_MD_CTX* soft_ctx; uint32_t switch_flag; }; struct digest_engine_ctx { KAE_QUEUE_DATA_NODE_S* q_node; struct wcrypto_digest_op_data op_data; struct wcrypto_digest_ctx_setup setup; void* wd_ctx; // one ctx or a list of ctx sec_digest_priv_t* md_ctx; }; int sec_engine_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid); void sec_digests_free_methods(void); int sec_cipher_engine_ctx_poll(void* engnine_ctx); int digest_module_init(void); void sec_digests_cb(const void* msg, void* tag); #endif KAE/alg/digests/sec_digests_wd.h0000644060212406010010000000260613616500010013733 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the digest interface for KAE digests using wd interface * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef SEC_DIGESTS_WD_H #define SEC_DIGESTS_WD_H #include "sec_digests.h" digest_engine_ctx_t* wd_digests_get_engine_ctx(sec_digest_priv_t* md_ctx); void wd_digests_put_engine_ctx(digest_engine_ctx_t* e_digest_ctx); int wd_digests_doimpl(digest_engine_ctx_t *e_digest_ctx); inline void wd_digests_set_input_data(digest_engine_ctx_t *e_digest_ctx); inline void wd_digests_get_output_data(digest_engine_ctx_t *e_digest_ctx); inline uint32_t wd_digests_get_do_digest_len(digest_engine_ctx_t *e_digest_ctx, int leftlen); KAE_QUEUE_POOL_HEAD_S* wd_digests_get_qnode_pool(void); int wd_digests_init_qnode_pool(void); void wd_digests_free_engine_ctx(void* digest_ctx); #endif KAE/alg/digests/sec_digests_soft.c0000644060212406010010000000720113616500010014263 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for switch to soft digests * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <asm/byteorder.h> #include "sec_digests_soft.h" #include "engine_opensslerr.h" #include "engine_log.h" static const EVP_MD *sec_digests_soft_md(uint32_t e_nid) { const EVP_MD *g_digest_md = NULL; switch (e_nid) { case NID_sm3: g_digest_md = EVP_sm3(); break; default: break; } return g_digest_md; } int sec_digests_soft_init(EVP_MD_CTX *ctx, uint32_t e_nid) { const EVP_MD *digest_md = NULL; digest_md = sec_digests_soft_md(e_nid); if (digest_md == NULL) { US_WARN("switch to soft:don't support by sec engine."); return OPENSSL_FAIL; } int ctx_len = EVP_MD_meth_get_app_datasize(digest_md); if (ctx->md_data == NULL) { ctx->md_data = OPENSSL_malloc(ctx_len); } if (!ctx->md_data) { KAEerr(KAE_F_DIGEST_SOFT_INIT, KAE_R_MALLOC_FAILURE); US_ERR("malloc md_data failed"); return OPENSSL_FAIL; } return EVP_MD_meth_get_init (digest_md)(ctx); } int sec_digests_soft_update(EVP_MD_CTX *ctx, const void *data, size_t data_len, uint32_t e_nid) { const EVP_MD *digest_md = NULL; digest_md = sec_digests_soft_md(e_nid); if (digest_md == NULL) { US_WARN("switch to soft:don't support by sec engine."); return OPENSSL_FAIL; } return EVP_MD_meth_get_update (digest_md)(ctx, data, data_len); } int sec_digests_soft_final(EVP_MD_CTX *ctx, unsigned char *digest, uint32_t e_nid) { US_WARN_LIMIT("call sec_digest_soft_final"); const EVP_MD *digest_md = NULL; digest_md = sec_digests_soft_md(e_nid); if (digest_md == NULL) { US_WARN("switch to soft:don't support by sec engine."); return OPENSSL_FAIL; } int ret = EVP_MD_meth_get_final(digest_md)(ctx, digest); if (ctx->md_data) { OPENSSL_free(ctx->md_data); } return ret; } void sec_digests_soft_work(sec_digest_priv_t *md_ctx, int len, unsigned char *digest) { if (md_ctx->soft_ctx == NULL) { md_ctx->soft_ctx = EVP_MD_CTX_new(); } if (md_ctx->last_update_buff == NULL) { md_ctx->last_update_buff = (unsigned char *)kae_malloc(len * sizeof(unsigned char)); } if (md_ctx->last_update_buff == NULL) { US_ERR("digests soft work:malloc last_update_buff filed!"); } (void)sec_digests_soft_init(md_ctx->soft_ctx, md_ctx->e_nid); (void)sec_digests_soft_update(md_ctx->soft_ctx, md_ctx->last_update_buff, len, md_ctx->e_nid); (void)sec_digests_soft_final(md_ctx->soft_ctx, digest, md_ctx->e_nid); if (md_ctx->soft_ctx != NULL) { EVP_MD_CTX_free(md_ctx->soft_ctx); md_ctx->soft_ctx = NULL; } if (md_ctx->last_update_buff != NULL) { kae_free(md_ctx->last_update_buff); } return; } void sec_digests_soft_cleanup(sec_digest_priv_t *md_ctx) { if (md_ctx->soft_ctx != NULL) { EVP_MD_CTX_free(md_ctx->soft_ctx); md_ctx->soft_ctx = NULL; } return; } KAE/alg/digests/sec_digests_soft.h0000644060212406010010000000311513616500010014270 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the digest interface for soft digests * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef SEC_DIGESTS_SOFT_H #define SEC_DIGESTS_SOFT_H #include "sec_digests.h" struct evp_md_ctx_st { const EVP_MD *digest; ENGINE *engine; /* functional reference if 'digest' is * ENGINE-provided */ unsigned long flags; void *md_data; /* Public key context for sign/verify */ EVP_PKEY_CTX *pctx; /* Update function: usually copied from EVP_MD */ int (*update)(EVP_MD_CTX *ctx, const void *data, size_t count); } /* EVP_MD_CTX */; int sec_digests_soft_init(EVP_MD_CTX *ctx, uint32_t e_nid); int sec_digests_soft_update(EVP_MD_CTX *ctx, const void *data, size_t data_len, uint32_t e_nid); int sec_digests_soft_final(EVP_MD_CTX *ctx, unsigned char *digest, uint32_t e_nid); void sec_digests_soft_work(sec_digest_priv_t *md_ctx, int len, unsigned char *digest); void sec_digests_soft_cleanup(sec_digest_priv_t *md_ctx); #endif KAE/alg/dh/0000755060212406010010000000000013616500010007521 5ustar KAE/alg/dh/hpre_dh_soft.c0000644060212406010010000000667113616500010012343 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for switch to soft dh. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hpre_dh_soft.h" #include "engine_types.h" #include "engine_log.h" static int generate_new_priv_key(const DH* dh, BIGNUM* new_priv_key); void hpre_dh_soft_get_pg(const DH* dh, const BIGNUM** p, const BIGNUM** g, const BIGNUM** q) { DH_get0_pqg(dh, p, q, g); } int hpre_dh_soft_try_get_priv_key(const DH* dh, BIGNUM** priv_key) { int generate_new_key = 0; BIGNUM* new_priv_key = NULL; // get the private key from dh. *priv_key = (BIGNUM*)DH_get0_priv_key(dh); if (*priv_key == NULL) { new_priv_key = BN_secure_new(); if (new_priv_key == NULL) { goto err; } generate_new_key = 1; } if (generate_new_key) { // generate random private key,referencing function 'generate_key' in openssl if (generate_new_priv_key(dh, new_priv_key) == OPENSSL_FAIL) { goto err; } else { *priv_key = new_priv_key; } } return OPENSSL_SUCCESS; err: BN_free(new_priv_key); return OPENSSL_FAIL; } void hpre_dh_soft_set_pkeys(DH* dh, BIGNUM* pub_key, BIGNUM* priv_key) { const BIGNUM* old_pub = DH_get0_pub_key(dh); const BIGNUM* old_priv = DH_get0_priv_key(dh); if (old_pub != pub_key && old_priv != priv_key) { DH_set0_key(dh, pub_key, priv_key); } else if (old_pub != pub_key) { DH_set0_key(dh, pub_key, NULL); } else if (old_priv != priv_key) { DH_set0_key(dh, NULL, priv_key); } } int hpre_dh_soft_generate_key(DH* dh) { int (*dh_soft_generate_key)(DH *dh); dh_soft_generate_key = DH_meth_get_generate_key(DH_OpenSSL()); int ret = dh_soft_generate_key(dh); if (ret < 0) { US_ERR("dh soft key generate fail: %d", ret); return OPENSSL_FAIL; } return OPENSSL_SUCCESS; } int hpre_dh_soft_compute_key(unsigned char* key, const BIGNUM* pub_key, DH* dh) { int (*dh_soft_compute_key)(unsigned char *key, const BIGNUM *pub_key, DH *dh); dh_soft_compute_key = DH_meth_get_compute_key(DH_OpenSSL()); int ret = dh_soft_compute_key(key, pub_key, dh); if (ret < 0) { US_ERR("dh soft key compute fail: %d", ret); return OPENSSL_FAIL; } return ret; } static int generate_new_priv_key(const DH* dh, BIGNUM* new_priv_key) { const BIGNUM* q = DH_get0_q(dh); int l; if (q) { do { if (!BN_priv_rand_range(new_priv_key, q)) { return OPENSSL_FAIL; } } while (BN_is_zero(new_priv_key) || BN_is_one(new_priv_key)); } else { l = DH_get_length(dh) ? DH_get_length(dh) : BN_num_bits(DH_get0_p(dh)) - 1; if (!BN_priv_rand(new_priv_key, l, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ANY)) { return OPENSSL_FAIL; } } return OPENSSL_SUCCESS; }KAE/alg/dh/hpre_dh_wd.c0000644060212406010010000003122013616500010011766 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides wd api for DH. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hpre_dh_wd.h" #include "hpre_dh_util.h" #include "engine_types.h" #include "engine_log.h" #include "async_callback.h" #include "async_task_queue.h" #include "async_event.h" #include "utils/engine_check.h" #include <openssl/bn.h> #define DH_GENERATOR_2 2 #define DH_GENERATOR_5 5 #define CHAR_BIT_SIZE 3 #define DH_PARAMS_CNT 4 #define MAX_SEND_TRY_CNTS 50 #define WD_STATUS_BUSY (-EBUSY) KAE_QUEUE_POOL_HEAD_S* g_hpre_dh_qnode_pool = NULL; static hpre_dh_engine_ctx_t* hpre_dh_new_eng_ctx(DH* alg); static int hpre_dh_init_eng_ctx(hpre_dh_engine_ctx_t* eng_ctx, int bits, bool is_g2); static int hpre_dh_set_g(const BIGNUM* g, const int key_size, unsigned char* ag_bin, hpre_dh_engine_ctx_t* engine_ctx); static int hpre_dh_fill_g_p_priv_key( const BIGNUM* g, const BIGNUM* p, const BIGNUM* priv_key, hpre_dh_engine_ctx_t* engine_ctx, unsigned char* ag_bin); static int hpre_dh_internal_do(void* ctx, struct wcrypto_dh_op_data* opdata); static int hpre_dh_fill_pub_key(const BIGNUM* pub_key, hpre_dh_engine_ctx_t* engine_ctx, unsigned char* ag_bin); static void hpre_dh_free_opdata(hpre_dh_engine_ctx_t* eng_ctx); static int hpre_internal_do_dh(hpre_dh_engine_ctx_t *eng_ctx, enum wcrypto_dh_op_type op_type); static int hpre_dh_async(hpre_dh_engine_ctx_t *eng_ctx, struct wcrypto_dh_op_data *opdata, op_done_t *op_done); int wd_hpre_dh_init_qnode_pool() { kae_queue_pool_destroy(g_hpre_dh_qnode_pool, NULL); g_hpre_dh_qnode_pool = kae_init_queue_pool(WCRYPTO_DH); if (g_hpre_dh_qnode_pool == NULL) { WD_ERR("hpre dh qnode poll init fail!\n"); return KAE_FAIL; } return KAE_SUCCESS; } KAE_QUEUE_POOL_HEAD_S* wd_hpre_dh_get_qnode_pool() { return g_hpre_dh_qnode_pool; } hpre_dh_engine_ctx_t* hpre_dh_get_eng_ctx(DH* dh, int bits, bool is_g2) { hpre_dh_engine_ctx_t* eng_ctx = hpre_dh_new_eng_ctx(dh); if (eng_ctx == NULL) { US_WARN("new eng ctx fail then switch to soft!"); return NULL; } if (hpre_dh_init_eng_ctx(eng_ctx, bits, is_g2) == 0) { hpre_dh_free_eng_ctx(eng_ctx); US_WARN("init eng ctx fail then switch to soft!"); return NULL; } return eng_ctx; } int hpre_dh_fill_genkey_opdata( const BIGNUM* g, const BIGNUM* p, const BIGNUM* priv_key, hpre_dh_engine_ctx_t* engine_ctx) { unsigned char* ag_bin = NULL; int key_size = engine_ctx->priv_ctx.key_size; // allocate data block ag_bin = (unsigned char *)kae_wd_alloc_blk(engine_ctx->qlist->kae_queue_mem_pool, key_size); if (!ag_bin) { US_ERR("pool alloc ag_bin fail!"); return -ENOMEM; } int ret = hpre_dh_fill_g_p_priv_key(g, p, priv_key, engine_ctx, ag_bin); if (ret != HPRE_DH_SUCCESS) { kae_wd_free_blk(engine_ctx->qlist->kae_queue_mem_pool, ag_bin); return ret; } engine_ctx->priv_ctx.block_addr = ag_bin; return HPRE_DH_SUCCESS; } int hpre_dh_fill_compkey_opdata( const BIGNUM* g, const BIGNUM* p, const BIGNUM* priv_key, const BIGNUM* pub_key, hpre_dh_engine_ctx_t* engine_ctx) { unsigned char* ag_bin = NULL; int key_size = engine_ctx->priv_ctx.key_size; ag_bin = (unsigned char*)kae_wd_alloc_blk(engine_ctx->qlist->kae_queue_mem_pool, key_size); if (!ag_bin) { US_ERR("pool alloc ag_bin fail!"); return -ENOMEM; } int ret = hpre_dh_fill_g_p_priv_key(g, p, priv_key, engine_ctx, ag_bin); if (ret != HPRE_DH_SUCCESS) { kae_wd_free_blk(engine_ctx->qlist->kae_queue_mem_pool, ag_bin); return ret; } ret = hpre_dh_fill_pub_key(pub_key, engine_ctx, ag_bin); if (ret != HPRE_DH_SUCCESS) { return ret; } engine_ctx->priv_ctx.block_addr = ag_bin; return HPRE_DH_SUCCESS; } int hpre_dh_genkey(hpre_dh_engine_ctx_t* engine_ctx) { return hpre_internal_do_dh(engine_ctx, WCRYPTO_DH_PHASE1); } int hpre_dh_compkey(hpre_dh_engine_ctx_t* engine_ctx) { return hpre_internal_do_dh(engine_ctx, WCRYPTO_DH_PHASE2); } int hpre_dh_get_output_chars(hpre_dh_engine_ctx_t* engine_ctx, unsigned char* out) { kae_memcpy(out, engine_ctx->opdata.pri, engine_ctx->opdata.pri_bytes); return engine_ctx->opdata.pri_bytes; } int hpre_dh_get_pubkey(hpre_dh_engine_ctx_t* engine_ctx, BIGNUM** pubkey) { const unsigned char* pubkey_str = (const unsigned char*)engine_ctx->opdata.pri; if (pubkey_str == NULL) { return HPRE_DH_FAIL; } *pubkey = BN_bin2bn(pubkey_str, engine_ctx->opdata.pri_bytes, *pubkey); if (*pubkey == NULL) { return HPRE_DH_FAIL; } return HPRE_DH_SUCCESS; } void hpre_dh_free_eng_ctx(hpre_dh_engine_ctx_t* eng_ctx) { US_DEBUG("hpre dh free engine ctx start!"); if (eng_ctx == NULL) { US_DEBUG("no eng_ctx to free"); return; } if (eng_ctx->qlist != NULL) { if (eng_ctx->ctx != NULL) { wcrypto_del_dh_ctx(eng_ctx->ctx); } kae_put_node_to_pool(g_hpre_dh_qnode_pool, eng_ctx->qlist); } hpre_dh_free_opdata(eng_ctx); eng_ctx->priv_ctx.block_addr = NULL; eng_ctx->priv_ctx.ssl_alg = NULL; eng_ctx->qlist = NULL; eng_ctx->ctx = NULL; eng_ctx->opdata.pri = NULL; eng_ctx->opdata.x_p = NULL; eng_ctx->opdata.pv = NULL; OPENSSL_free(eng_ctx); eng_ctx = NULL; return; } static int hpre_internal_do_dh(hpre_dh_engine_ctx_t *eng_ctx, enum wcrypto_dh_op_type op_type) { int job_ret; op_done_t op_done; async_init_op_done(&op_done); eng_ctx->opdata.op_type = op_type; if (op_done.job != NULL && kae_is_async_enabled()) { if (async_setup_async_event_notification(0) == 0) { US_ERR("hpre async event notifying failed"); async_cleanup_op_done(&op_done); return HPRE_DH_FAIL; } } else { US_DEBUG("hpre dh no async Job or async disable, back to sync!"); async_cleanup_op_done(&op_done); return hpre_dh_internal_do(eng_ctx->ctx, &eng_ctx->opdata); } if (hpre_dh_async(eng_ctx, &eng_ctx->opdata, &op_done) == HPRE_DH_FAIL) goto err; do { job_ret = async_pause_job(op_done.job, ASYNC_STATUS_OK); if (job_ret == 0) { US_DEBUG("- pthread_yidle -"); kae_pthread_yield(); } } while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); if (op_done.verifyRst <= 0) { US_ERR("hpre dh verify result failed with %d", op_done.verifyRst); async_cleanup_op_done(&op_done); return HPRE_DH_FAIL; } async_cleanup_op_done(&op_done); US_DEBUG("hpre dh do async job success!"); return HPRE_DH_SUCCESS; err: US_ERR("hpre dh do async job err"); (void)async_clear_async_event_notification(); async_cleanup_op_done(&op_done); return HPRE_DH_FAIL; } static void hpre_dh_free_opdata(hpre_dh_engine_ctx_t* eng_ctx) { if (eng_ctx->priv_ctx.block_addr != NULL) { if (eng_ctx->qlist != NULL) { eng_ctx->dh_setup.br.free(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->priv_ctx.block_addr); } } } static hpre_dh_engine_ctx_t* hpre_dh_new_eng_ctx(DH* alg) { hpre_dh_engine_ctx_t* eng_ctx = NULL; eng_ctx = (hpre_dh_engine_ctx_t*)OPENSSL_malloc(sizeof(hpre_dh_engine_ctx_t)); if (eng_ctx == NULL) { US_ERR("hpre engine_ctx malloc fail"); return NULL; } kae_memset(eng_ctx, 0, sizeof(hpre_dh_engine_ctx_t)); eng_ctx->priv_ctx.ssl_alg = alg; eng_ctx->qlist = kae_get_node_from_pool(g_hpre_dh_qnode_pool); if (eng_ctx->qlist == NULL) { US_ERR_LIMIT("error. get hardware queue failed"); OPENSSL_free(eng_ctx); eng_ctx = NULL; return NULL; } return eng_ctx; } static void hpre_dh_cb(const void *message, void *tag) { if (!message || !tag) { US_ERR("hpre cb params err!\n"); return; } struct wcrypto_dh_msg *msg = (struct wcrypto_dh_msg *)message; hpre_dh_engine_ctx_t *eng_ctx = (hpre_dh_engine_ctx_t *)tag; eng_ctx->opdata.pri = msg->out; eng_ctx->opdata.pri_bytes = msg->out_bytes; eng_ctx->opdata.status = msg->result; } static int hpre_dh_init_eng_ctx(hpre_dh_engine_ctx_t* eng_ctx, int bits, bool is_g2) { struct wd_queue* q = eng_ctx->qlist->kae_wd_queue; struct wd_queue_mempool* pool = eng_ctx->qlist->kae_queue_mem_pool; // this is for ctx is in use.we dont need to re create ctx->ctx again if (eng_ctx->ctx) { return OPENSSL_SUCCESS; } if (eng_ctx->ctx == NULL) { if (bits == 0) { eng_ctx->priv_ctx.key_size = DH_size(eng_ctx->priv_ctx.ssl_alg); } else { eng_ctx->priv_ctx.key_size = bits >> CHAR_BIT_SIZE; } eng_ctx->priv_ctx.block_addr = NULL; eng_ctx->dh_setup.key_bits = eng_ctx->priv_ctx.key_size << CHAR_BIT_SIZE; eng_ctx->dh_setup.cb = hpre_dh_cb; eng_ctx->dh_setup.br.alloc = kae_wd_alloc_blk; eng_ctx->dh_setup.br.free = kae_wd_free_blk; eng_ctx->dh_setup.br.usr = pool; eng_ctx->dh_setup.is_g2 = is_g2; eng_ctx->ctx = wcrypto_create_dh_ctx(q, &eng_ctx->dh_setup); if (eng_ctx->ctx == NULL) { US_ERR("create dh ctx fail!"); return OPENSSL_FAIL; } } return OPENSSL_SUCCESS; } static int hpre_dh_set_g(const BIGNUM* g, const int key_size, unsigned char* ag_bin, hpre_dh_engine_ctx_t* engine_ctx) { struct wd_dtb g_dtb; __u32 gbytes = BN_bn2bin(g, ag_bin); g_dtb.data = (char*)ag_bin; g_dtb.bsize = key_size; g_dtb.dsize = gbytes; int ret = wcrypto_set_dh_g(engine_ctx->ctx, &g_dtb); if (ret) { US_ERR("wcrypto_set_dh_g fail: %d", ret); return HPRE_DH_FAIL; } return HPRE_DH_SUCCESS; } static int hpre_dh_fill_g_p_priv_key( const BIGNUM* g, const BIGNUM* p, const BIGNUM* priv_key, hpre_dh_engine_ctx_t* engine_ctx, unsigned char* ag_bin) { unsigned char* apriv_key_bin = NULL; unsigned char* ap_bin = NULL; int key_size = engine_ctx->priv_ctx.key_size; int ret = 0; apriv_key_bin = ag_bin + key_size; ap_bin = apriv_key_bin + key_size; memset(ag_bin, 0, key_size * DH_PARAMS_CNT); // construct data block of g ret = hpre_dh_set_g(g, key_size, ag_bin, engine_ctx); if (ret != HPRE_DH_SUCCESS) { return HPRE_DH_FAIL; } // construct data block of p and private key engine_ctx->opdata.pbytes = BN_bn2bin(p, ap_bin); engine_ctx->opdata.xbytes = BN_bn2bin(priv_key, apriv_key_bin); engine_ctx->opdata.x_p = apriv_key_bin; engine_ctx->opdata.pri = ap_bin + key_size; return HPRE_DH_SUCCESS; } static int hpre_dh_internal_do(void* ctx, struct wcrypto_dh_op_data* opdata) { int ret = wcrypto_do_dh(ctx, opdata, NULL); if (ret) { US_ERR("wcrypto_do_dh fail: %d", ret); return HPRE_DH_FAIL; } else if (opdata->pri == NULL) { US_ERR("output is empty"); return HPRE_DH_FAIL; } else { return HPRE_DH_SUCCESS; } } static int hpre_dh_fill_pub_key(const BIGNUM* pub_key, hpre_dh_engine_ctx_t* engine_ctx, unsigned char* ag_bin) { engine_ctx->opdata.pvbytes = BN_bn2bin(pub_key, ag_bin); engine_ctx->opdata.pv = ag_bin; /* bob's public key here */ return HPRE_DH_SUCCESS; } static int hpre_dh_async(hpre_dh_engine_ctx_t *eng_ctx, struct wcrypto_dh_op_data *opdata, op_done_t *op_done) { int ret = 0; int cnt = 0; enum task_type type = ASYNC_TASK_DH; void *tag = eng_ctx; do { if (cnt > MAX_SEND_TRY_CNTS) { break; } ret = wcrypto_do_dh(eng_ctx->ctx, opdata, tag); if (ret == WD_STATUS_BUSY) { if ((async_wake_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || (async_pause_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0))) { US_ERR("hpre wake job or hpre pause job fail!"); ret = 0; break; } cnt++; } } while (ret == WD_STATUS_BUSY); if (ret != WD_SUCCESS) { return HPRE_DH_FAIL; } if (async_add_poll_task(eng_ctx, op_done, type) == 0) { return HPRE_DH_FAIL; } return HPRE_DH_SUCCESS; } KAE/alg/dh/hpre_dh.h0000644060212406010010000000171113616500010011303 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine DH. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_DH_H #define HPRE_DH_H #include <openssl/dh.h> const DH_METHOD *hpre_get_dh_methods(void); int hpre_module_dh_init(); void hpre_dh_destroy(); EVP_PKEY_METHOD *get_dh_pkey_meth(void); EVP_PKEY_METHOD *get_dsa_pkey_meth(void); #endifKAE/alg/dh/hpre_dh.c0000644060212406010010000003006313616500010011300 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine DH. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hpre_dh.h" #include "hpre_dh_wd.h" #include "hpre_dh_soft.h" #include "hpre_dh_util.h" #include "engine_kae.h" #include "engine_types.h" #include "engine_opensslerr.h" #include "async_task_queue.h" #define DH768BITS 768 #define DH1024BITS 1024 #define DH1536BITS 1536 #define DH2048BITS 2048 #define DH3072BITS 3072 #define DH4096BITS 4096 #define GENERATOR_2 2 #ifndef OPENSSL_NO_DH const int DHPKEYMETH_IDX = 1; #else const int DHPKEYMETH_IDX = -1; #endif const char* g_hpre_dh_device = "hisi_hpre"; static DH_METHOD* g_hpre_dh_method = NULL; static EVP_PKEY_METHOD* g_hpre_dh_pkey_meth = NULL; static int hpre_dh_generate_key(DH* dh); static int hpre_dh_compute_key(unsigned char* key, const BIGNUM* pub_key, DH* dh); static int hpre_db_bn_mod_exp( const DH* dh, BIGNUM* r, const BIGNUM* a, const BIGNUM* p, const BIGNUM* m, BN_CTX* ctx, BN_MONT_CTX* m_ctx); static int check_dh_bit_useful(const int bit); static int prepare_dh_data(const int bits, const BIGNUM* g, DH* dh, hpre_dh_engine_ctx_t** eng_ctx, BIGNUM** priv_key); static int hpre_dh_ctx_poll(void* engine_ctx); static int hpre_dh_keygen(EVP_PKEY_CTX* ctx, EVP_PKEY* pkey); static int hpre_dh_derive(EVP_PKEY_CTX* ctx, unsigned char* key, size_t* keylen); const DH_METHOD* hpre_get_dh_methods(void) { int ret = 1; if (g_hpre_dh_method != NULL) { return g_hpre_dh_method; } if (!kae_get_device(g_hpre_dh_device)) { const DH_METHOD* default_soft_method = DH_OpenSSL(); return default_soft_method; } g_hpre_dh_method = DH_meth_new("HPRE DH method", 0); if (g_hpre_dh_method == NULL) { KAEerr(KAE_F_HPRE_GET_DH_METHODS, KAE_R_MALLOC_FAILURE); US_ERR("Failed to allocate HPRE DH methods"); return NULL; } ret &= DH_meth_set_generate_key(g_hpre_dh_method, hpre_dh_generate_key); ret &= DH_meth_set_compute_key(g_hpre_dh_method, hpre_dh_compute_key); ret &= DH_meth_set_bn_mod_exp(g_hpre_dh_method, hpre_db_bn_mod_exp); if (ret == 0) { KAEerr(KAE_F_HPRE_GET_DH_METHODS, KAE_R_DH_SET_METHODS_FAILURE); US_ERR("Failed to set HPRE DH methods"); return NULL; } return g_hpre_dh_method; } int hpre_module_dh_init() { wd_hpre_dh_init_qnode_pool(); /* register async poll func */ async_register_poll_fn(ASYNC_TASK_DH, hpre_dh_ctx_poll); return HPRE_DH_SUCCESS; } void hpre_dh_destroy() { if (g_hpre_dh_method != NULL) { DH_meth_free(g_hpre_dh_method); g_hpre_dh_method = NULL; } } EVP_PKEY_METHOD* get_dh_pkey_meth(void) { const EVP_PKEY_METHOD* def_dh = EVP_PKEY_meth_get0(DHPKEYMETH_IDX); if (g_hpre_dh_pkey_meth == NULL) { g_hpre_dh_pkey_meth = EVP_PKEY_meth_new(EVP_PKEY_DH, 0); if (g_hpre_dh_pkey_meth == NULL) { US_ERR("failed to new pkey meth"); return NULL; } } EVP_PKEY_meth_copy(g_hpre_dh_pkey_meth, def_dh); EVP_PKEY_meth_set_keygen(g_hpre_dh_pkey_meth, 0, hpre_dh_keygen); EVP_PKEY_meth_set_derive(g_hpre_dh_pkey_meth, 0, hpre_dh_derive); return g_hpre_dh_pkey_meth; } EVP_PKEY_METHOD *get_dsa_pkey_meth(void) { return (EVP_PKEY_METHOD*)EVP_PKEY_meth_get0(DHPKEYMETH_IDX); } static DH* change_dh_method(DH* dh_default) { const DH_METHOD* hw_dh = hpre_get_dh_methods(); DH* dh = DH_new(); const BIGNUM *p, *q, *g, *priv_key, *pub_key; BIGNUM *p1, *q1, *g1, *priv_key1, *pub_key1; DH_get0_pqg(dh_default, &p, &q, &g); DH_get0_key(dh_default, &pub_key, &priv_key); p1 = BN_dup(p); q1 = BN_dup(q); g1 = BN_dup(g); priv_key1 = BN_dup(priv_key); pub_key1 = BN_dup(pub_key); if (dh != NULL) { DH_set_method(dh, hw_dh); DH_set0_pqg(dh, p1, q1, g1); DH_set0_key(dh, pub_key1, priv_key1); return dh; } else { KAEerr(KAE_F_CHANGDHMETHOD, KAE_R_MALLOC_FAILURE); US_ERR("changDHMethod failed."); return (DH*)NULL; } } static int hpre_dh_keygen(EVP_PKEY_CTX* ctx, EVP_PKEY* pkey) { DH* dh = NULL; int ret = 0; int (*pkeygen)(EVP_PKEY_CTX* ctx, EVP_PKEY* pkey); EVP_PKEY* pk = EVP_PKEY_CTX_get0_pkey(ctx); DH* dh_default = EVP_PKEY_get1_DH(pk); bool is_dsa = DH_get0_q(dh_default) != NULL; if (is_dsa) { const EVP_PKEY_METHOD* def_dh_meth = EVP_PKEY_meth_get0(DHPKEYMETH_IDX); EVP_PKEY_meth_get_keygen(def_dh_meth, (int (**)(EVP_PKEY_CTX*))NULL, &pkeygen); ret = pkeygen(ctx, pkey); } else { dh = change_dh_method(dh_default); EVP_PKEY_set1_DH(pk, dh); const EVP_PKEY_METHOD* def_dh_meth = EVP_PKEY_meth_get0(DHPKEYMETH_IDX); EVP_PKEY_meth_get_keygen(def_dh_meth, (int (**)(EVP_PKEY_CTX*))NULL, &pkeygen); ret = pkeygen(ctx, pkey); EVP_PKEY_assign_DH(pk, dh_default); DH_free(dh); } return ret; } static int hpre_dh_derive(EVP_PKEY_CTX* ctx, unsigned char* key, size_t* keylen) { DH* dh = NULL; int ret = 0; int (*pderive)(EVP_PKEY_CTX* ctx, unsigned char* key, size_t* keylen); EVP_PKEY* pk = EVP_PKEY_CTX_get0_pkey(ctx); DH* dh_default = EVP_PKEY_get1_DH(pk); bool is_dsa = DH_get0_q(dh_default) != NULL; if (is_dsa) { const EVP_PKEY_METHOD* def_dh_meth = EVP_PKEY_meth_get0(DHPKEYMETH_IDX); EVP_PKEY_meth_get_derive(def_dh_meth, (int (**)(EVP_PKEY_CTX*))NULL, &pderive); ret = pderive(ctx, key, keylen); } else { dh = change_dh_method(dh_default); EVP_PKEY_set1_DH(pk, dh); const EVP_PKEY_METHOD* def_dh_meth = EVP_PKEY_meth_get0(DHPKEYMETH_IDX); EVP_PKEY_meth_get_derive(def_dh_meth, (int (**)(EVP_PKEY_CTX*))NULL, &pderive); ret = pderive(ctx, key, keylen); EVP_PKEY_assign_DH(pk, dh_default); DH_free(dh); } return ret; } static int hpre_dh_ctx_poll(void* engine_ctx) { int ret; hpre_dh_engine_ctx_t* eng_ctx = (hpre_dh_engine_ctx_t*)engine_ctx; struct wd_queue* q = eng_ctx->qlist->kae_wd_queue; poll_again: ret = wcrypto_dh_poll(q, 1); if (!ret) { goto poll_again; } else if (ret < 0) { US_ERR("dh poll fail!\n"); return ret; } return ret; } static int hpre_dh_generate_key(DH* dh) { int bits = DH_bits(dh); const BIGNUM* p = NULL; const BIGNUM* g = NULL; const BIGNUM* q = NULL; BIGNUM* pub_key = NULL; BIGNUM* priv_key = NULL; hpre_dh_engine_ctx_t* eng_ctx = NULL; int ret = HPRE_DH_FAIL; if (dh == NULL) { KAEerr(KAE_F_HPRE_DH_KEYGEN, KAE_R_DH_INVALID_PARAMETER); US_ERR("DH_BUILTIN_KEYGEN KAE_R_DH_INVALID_PARAMETER"); return HPRE_DH_FAIL; } hpre_dh_soft_get_pg(dh, &p, &g, &q); if (p == NULL || g == NULL) { KAEerr(KAE_F_HPRE_DH_KEYGEN, KAE_R_DH_INVALID_PARAMETER); US_ERR("invalid g or p."); return HPRE_DH_FAIL; } // check whether it is dsa parameter. CHECK_AND_GOTO(q != NULL, end_soft, "q is not null, then switch to soft!"); // check whether bits exceeds the limit. if (bits > OPENSSL_DH_MAX_MODULUS_BITS) { KAEerr(KAE_F_HPRE_DH_KEYGEN, KAE_R_DH_KEY_SIZE_TOO_LARGE); US_ERR("DH_BUILTIN_KEYGEN DH_KEY_SIZE_TOO_LARGE"); return HPRE_DH_FAIL; } ret = prepare_dh_data(bits, g, dh, &eng_ctx, &priv_key); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "prepare dh data failed!"); // construct opdata ret = hpre_dh_fill_genkey_opdata(g, p, priv_key, eng_ctx); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "fill opdata fail then switch to soft!"); // call wd api ret = hpre_dh_genkey(eng_ctx); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "hpre generate dh key failed.switch to soft!"); // get public key from opdata ret = hpre_dh_get_pubkey(eng_ctx, &pub_key); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "get pub key failed.switch to soft!"); // set public key and secret key to the DH. hpre_dh_soft_set_pkeys(dh, pub_key, priv_key); end_soft: if (pub_key != DH_get0_pub_key(dh)) { BN_free(pub_key); } if (priv_key != DH_get0_priv_key(dh)) { BN_free(priv_key); } hpre_dh_free_eng_ctx(eng_ctx); if (ret != HPRE_DH_SUCCESS) { return hpre_dh_soft_generate_key(dh); } else { US_DEBUG("hpre dh generate key success!"); return HPRE_DH_SUCCESS; } } static int hpre_dh_compute_key(unsigned char* key, const BIGNUM* pub_key, DH* dh) { int bits = DH_bits(dh); const BIGNUM* p = NULL; const BIGNUM* g = NULL; const BIGNUM* q = NULL; BIGNUM* priv_key = NULL; hpre_dh_engine_ctx_t* eng_ctx = NULL; int ret = HPRE_DH_FAIL; int ret_size = 0; if (dh == NULL || key == NULL || pub_key == NULL || DH_get0_priv_key(dh) == NULL) { KAEerr(KAE_F_HPRE_DH_KEYCOMP, KAE_R_DH_INVALID_PARAMETER); US_ERR("KAE_F_HPRE_DH_KEYCOMP KAE_R_DH_INVALID_PARAMETER"); return HPRE_DH_FAIL; } hpre_dh_soft_get_pg(dh, &p, &g, &q); if (p == NULL || g == NULL) { KAEerr(KAE_F_HPRE_DH_KEYCOMP, KAE_R_DH_INVALID_PARAMETER); US_ERR("invalid g or p."); return HPRE_DH_FAIL; } // check whether it is dsa parameter. CHECK_AND_GOTO(q != NULL, end_soft, "q is not null, then switch to soft!"); // check whether bits exceeds the limit. if (bits > OPENSSL_DH_MAX_MODULUS_BITS) { KAEerr(KAE_F_HPRE_DH_KEYCOMP, KAE_R_DH_KEY_SIZE_TOO_LARGE); US_ERR("DH_BUILTIN_KEYGEN DH_KEY_SIZE_TOO_LARGE"); return HPRE_DH_FAIL; } ret = prepare_dh_data(bits, g, dh, &eng_ctx, &priv_key); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "prepare dh data failed!"); // construct opdata ret = hpre_dh_fill_compkey_opdata(g, p, priv_key, pub_key, eng_ctx); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "fill opdata fail then switch to soft!"); // call wd api to generate shared secret key. ret = hpre_dh_compkey(eng_ctx); CHECK_AND_GOTO(ret != HPRE_DH_SUCCESS, end_soft, "hpre compute dh key failed.switch to soft!"); ret_size = hpre_dh_get_output_chars(eng_ctx, key); end_soft: hpre_dh_free_eng_ctx(eng_ctx); if (ret != HPRE_DH_SUCCESS) { return hpre_dh_soft_compute_key(key, pub_key, dh); } else { US_DEBUG("hpre dh compute key success!"); return ret_size; } } static int hpre_db_bn_mod_exp( const DH* dh, BIGNUM* r, const BIGNUM* a, const BIGNUM* p, const BIGNUM* m, BN_CTX* ctx, BN_MONT_CTX* m_ctx) { return BN_mod_exp_mont(r, a, p, m, ctx, m_ctx); } static int check_dh_bit_useful(const int bit) { switch (bit) { case DH768BITS: case DH1024BITS: case DH1536BITS: case DH2048BITS: case DH3072BITS: case DH4096BITS: return 1; default: break; } return 0; } static int prepare_dh_data(const int bits, const BIGNUM* g, DH* dh, hpre_dh_engine_ctx_t** eng_ctx, BIGNUM** priv_key) { int ret = HPRE_DH_FAIL; bool is_g2 = BN_is_word(g, GENERATOR_2); // check whether the bits is supported by hpre. CHECK_AND_GOTO(!check_dh_bit_useful(bits), err, "op sizes not supported by hpre engine then back to soft!"); // get ctx *eng_ctx = hpre_dh_get_eng_ctx(dh, bits, is_g2); CHECK_AND_GOTO(*eng_ctx == NULL, err, "get eng ctx fail then switch to soft!"); // get private key ret = hpre_dh_soft_try_get_priv_key(dh, priv_key); CHECK_AND_GOTO(ret != OPENSSL_SUCCESS, err, "get priv key fail then switch to soft!"); return HPRE_DH_SUCCESS; err: return HPRE_DH_FAIL; }KAE/alg/dh/hpre_dh_util.h0000644060212406010010000000211613616500010012340 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides common function for DH. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_DH_UTILS_H #define HPRE_DH_UTILS_H #define HPRE_DH_SUCCESS 1 #define HPRE_DH_FAIL 0 #define CHECK_AND_GOTO(cond, goto_tag, log) \ do { \ if (cond) { \ US_WARN(log); \ goto goto_tag; \ } \ } while (0) #endifKAE/alg/dh/hpre_dh_soft.h0000644060212406010010000000257513616500010012347 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for switch to soft dh. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_DH_SOFT_H #define HPRE_DH_SOFT_H #include <openssl/dh.h> /* * get p, g, q in dh. */ void hpre_dh_soft_get_pg(const DH* dh, const BIGNUM** p, const BIGNUM** g, const BIGNUM** q); /* * get private key in dh, if null, then generate a random one. */ int hpre_dh_soft_try_get_priv_key(const DH* dh, BIGNUM** priv_key); /* * put private key and public key in the dh. */ void hpre_dh_soft_set_pkeys(DH* dh, BIGNUM* pub_key, BIGNUM* priv_key); /* * call openssl API to generate public key . */ int hpre_dh_soft_generate_key(DH* dh); /* * call openssl API to generate secret key . */ int hpre_dh_soft_compute_key(unsigned char* key, const BIGNUM* pub_key, DH* dh); #endifKAE/alg/dh/hpre_dh_wd.h0000644060212406010010000000440713616500010012002 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides wd api for DH. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_DH_WD_H #define HPRE_DH_WD_H #include <openssl/dh.h> #include "wdmngr/wd_queue_memory.h" #include "wd_dh.h" struct hpre_dh_priv_ctx { DH* ssl_alg; int key_size; unsigned char* block_addr; }; typedef struct hpre_dh_priv_ctx hpre_dh_priv_ctx_t; struct hpre_dh_engine_ctx { void* ctx; struct wcrypto_dh_op_data opdata; struct wcrypto_dh_ctx_setup dh_setup; struct KAE_QUEUE_DATA_NODE* qlist; hpre_dh_priv_ctx_t priv_ctx; }; typedef struct hpre_dh_engine_ctx hpre_dh_engine_ctx_t; int wd_hpre_dh_init_qnode_pool(); KAE_QUEUE_POOL_HEAD_S* wd_hpre_dh_get_qnode_pool(); void hpre_dh_free_eng_ctx(hpre_dh_engine_ctx_t* eng_ctx); hpre_dh_engine_ctx_t* hpre_dh_get_eng_ctx(DH* dh, int bits, bool is_g2); /* * fill opdata for generate_key. */ int hpre_dh_fill_genkey_opdata(const BIGNUM* g, const BIGNUM* p, const BIGNUM* priv_key, hpre_dh_engine_ctx_t* engine_ctx); /* * fill opdata for compute_key. */ int hpre_dh_fill_compkey_opdata(const BIGNUM* g, const BIGNUM* p, const BIGNUM* priv_key, const BIGNUM* pub_key, hpre_dh_engine_ctx_t* engine_ctx); /* * call wd API for generating public key. */ int hpre_dh_genkey(hpre_dh_engine_ctx_t* engine_ctx); /* * call wd API for generating secret key. */ int hpre_dh_compkey(hpre_dh_engine_ctx_t* engine_ctx); /* * get public key from engine ctx. */ int hpre_dh_get_pubkey(hpre_dh_engine_ctx_t* engine_ctx, BIGNUM** pubkey); /* * get secret key from engine ctx. */ int hpre_dh_get_output_chars(hpre_dh_engine_ctx_t* engine_ctx, unsigned char* out); #endif KAE/alg/pkey/0000755060212406010010000000000013616500010010076 5ustar KAE/alg/pkey/hpre_rsa.c0000644060212406010010000007073513616500010012061 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine rsa * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <openssl/rsa.h> #include <openssl/err.h> #include <openssl/evp.h> #include "hpre_rsa.h" #include "hpre_wd.h" #include "hpre_rsa_soft.h" #include "async_poll.h" #include "engine_types.h" #include "engine_kae.h" #include "hpre_rsa_utils.h" #ifndef OPENSSL_NO_RSA const int RSAPKEYMETH_IDX = 0; #else const int RSAPKEYMETH_IDX = -1; #endif const char *g_hpre_device = "hisi_hpre"; static RSA_METHOD *g_hpre_rsa_method = NULL; static RSA_METHOD *g_soft_rsa_method = NULL; static EVP_PKEY_METHOD *g_hpre_pkey_meth = NULL; static int hpre_rsa_public_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); static int hpre_rsa_private_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); static int hpre_rsa_public_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); static int hpre_rsa_private_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding); static int hpre_rsa_keygen(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); static int hpre_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx); static int hpre_bn_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); static int hpre_evp_encrypt(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen); static int hpre_evp_decrypt(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen); static int hpre_evp_sign(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen); static int hpre_evp_verify(EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen, const unsigned char *tbs, size_t tbslen); RSA_METHOD *hpre_get_rsa_methods(void) { int ret = 1; if (g_hpre_rsa_method != NULL) { return g_hpre_rsa_method; } if (g_soft_rsa_method != NULL) { return g_soft_rsa_method; } if (!kae_get_device(g_hpre_device)) { const RSA_METHOD *default_soft_method = RSA_PKCS1_OpenSSL(); g_soft_rsa_method = RSA_meth_new("SOFT RSA METHOD", 0); ret &= RSA_meth_set_pub_enc(g_soft_rsa_method, RSA_meth_get_pub_enc(default_soft_method)); ret &= RSA_meth_set_priv_enc(g_soft_rsa_method, RSA_meth_get_priv_enc(default_soft_method)); ret &= RSA_meth_set_pub_dec(g_soft_rsa_method, RSA_meth_get_pub_dec(default_soft_method)); ret &= RSA_meth_set_priv_dec(g_soft_rsa_method, RSA_meth_get_priv_dec(default_soft_method)); ret &= RSA_meth_set_keygen(g_soft_rsa_method, hpre_rsa_soft_genkey); ret &= RSA_meth_set_mod_exp(g_soft_rsa_method, RSA_meth_get_mod_exp(default_soft_method)); ret &= RSA_meth_set_bn_mod_exp(g_soft_rsa_method, RSA_meth_get_bn_mod_exp(default_soft_method)); if (ret == 0) { US_ERR("Failed to set SOFT RSA methods"); return NULL; } return g_soft_rsa_method; } g_hpre_rsa_method = RSA_meth_new("HPRE RSA method", 0); if (g_hpre_rsa_method == NULL) { KAEerr(KAE_F_HPRE_GET_RSA_METHODS, KAE_R_MALLOC_FAILURE); US_ERR("Failed to allocate HPRE RSA methods"); return NULL; } ret &= RSA_meth_set_pub_enc(g_hpre_rsa_method, hpre_rsa_public_encrypt); ret &= RSA_meth_set_pub_dec(g_hpre_rsa_method, hpre_rsa_public_decrypt); ret &= RSA_meth_set_priv_enc(g_hpre_rsa_method, hpre_rsa_private_encrypt); ret &= RSA_meth_set_priv_dec(g_hpre_rsa_method, hpre_rsa_private_decrypt); ret &= RSA_meth_set_keygen(g_hpre_rsa_method, hpre_rsa_keygen); ret &= RSA_meth_set_mod_exp(g_hpre_rsa_method, hpre_rsa_mod_exp); ret &= RSA_meth_set_bn_mod_exp(g_hpre_rsa_method, hpre_bn_mod_exp); if (ret == 0) { KAEerr(KAE_F_HPRE_GET_RSA_METHODS, KAE_R_RSA_SET_METHODS_FAILURE); US_ERR("Failed to set HPRE RSA methods"); return NULL; } return g_hpre_rsa_method; } static void hpre_free_rsa_methods(void) { if (g_hpre_rsa_method != NULL) { RSA_meth_free(g_hpre_rsa_method); g_hpre_rsa_method = NULL; } if (g_soft_rsa_method != NULL) { RSA_meth_free(g_soft_rsa_method); g_soft_rsa_method = NULL; } } int hpre_engine_ctx_poll(void* engine_ctx) { int ret; hpre_engine_ctx_t *eng_ctx = (hpre_engine_ctx_t *)engine_ctx; struct wd_queue *q = eng_ctx->qlist->kae_wd_queue; poll_again: ret = wcrypto_rsa_poll(q, 1); if (!ret) { goto poll_again; } else if (ret < 0) { US_ERR("rsa poll fail!\n"); return ret; } return ret; } int hpre_module_init() { /* init queue */ wd_hpre_init_qnode_pool(); /* register async poll func */ async_register_poll_fn(ASYNC_TASK_RSA, hpre_engine_ctx_poll); return 1; } EVP_PKEY_METHOD *get_rsa_pkey_meth(void) { const EVP_PKEY_METHOD *def_rsa = EVP_PKEY_meth_get0(RSAPKEYMETH_IDX); if (g_hpre_pkey_meth == NULL) { g_hpre_pkey_meth = EVP_PKEY_meth_new(EVP_PKEY_RSA, 0); if (g_hpre_pkey_meth == NULL) { US_ERR("failed to new pkey meth"); return NULL; } } EVP_PKEY_meth_copy(g_hpre_pkey_meth, def_rsa); EVP_PKEY_meth_set_encrypt(g_hpre_pkey_meth, 0, hpre_evp_encrypt); EVP_PKEY_meth_set_decrypt(g_hpre_pkey_meth, 0, hpre_evp_decrypt); EVP_PKEY_meth_set_sign(g_hpre_pkey_meth, 0, hpre_evp_sign); EVP_PKEY_meth_set_verify(g_hpre_pkey_meth, 0, hpre_evp_verify); return g_hpre_pkey_meth; } void hpre_destroy() { hpre_free_rsa_methods(); } /* * Description: Update RsaMethod (not generated by the hardware engine for incoming rsa keys) * @param rsa_default ctx incoming RSA key * @return RSA* RSA key with hardware method * note:The RSA_set_method is not directly used here because rsa_default may be referenced elsewhere. * Set will call free and then init causes failure */ static RSA *change_rsa_method(RSA *rsa_default) { RSA_METHOD* hw_rsa = hpre_get_rsa_methods(); RSA *rsa = RSA_new(); const BIGNUM *e, *p, *q, *n, *d, *dmp1, *dmq1, *iqmp; BIGNUM *e1, *p1, *q1, *n1, *d1, *dmp11, *dmq11, *iqmp1; RSA_get0_key(rsa_default, &n, &e, &d); RSA_get0_factors(rsa_default, &p, &q); RSA_get0_crt_params(rsa_default, &dmp1, &dmq1, &iqmp); e1 = BN_dup(e); p1 = BN_dup(p); q1 = BN_dup(q); n1 = BN_dup(n); d1 = BN_dup(d); dmp11 = BN_dup(dmp1); dmq11 = BN_dup(dmq1); iqmp1 = BN_dup(iqmp); if (rsa != NULL) { RSA_set_method(rsa, hw_rsa); RSA_set0_key(rsa, n1, e1, d1); RSA_set0_factors(rsa, p1, q1); RSA_set0_crt_params(rsa, dmp11, dmq11, iqmp1); return rsa; } else { KAEerr(KAE_F_CHANGRSAMETHOD, KAE_R_MALLOC_FAILURE); US_ERR("changRsaMethod failed."); return (RSA *)NULL; } } static int hpre_evp_encrypt(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen) { int (*pencryptfn)(EVP_PKEY_CTX *, unsigned char *, size_t *, const unsigned char *, size_t); EVP_PKEY *pk = EVP_PKEY_CTX_get0_pkey(ctx); RSA *rsa_default = EVP_PKEY_get1_RSA(pk); RSA *rsa = change_rsa_method(rsa_default); EVP_PKEY_set1_RSA(pk, rsa); const EVP_PKEY_METHOD *def_rsa_meth = EVP_PKEY_meth_get0(RSAPKEYMETH_IDX); EVP_PKEY_meth_get_encrypt (def_rsa_meth, (int(**)(EVP_PKEY_CTX *))NULL, &pencryptfn); int ret = pencryptfn(ctx, sig, siglen, tbs, tbslen); EVP_PKEY_assign_RSA(pk, rsa_default); RSA_free(rsa); return ret; } static int hpre_evp_decrypt(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen) { int (*pdecrypt)(EVP_PKEY_CTX *, unsigned char *, size_t *, const unsigned char *, size_t); EVP_PKEY *pk = EVP_PKEY_CTX_get0_pkey(ctx); RSA *rsa_default = EVP_PKEY_get1_RSA(pk); RSA *rsa = change_rsa_method(rsa_default); EVP_PKEY_set1_RSA(pk, rsa); const EVP_PKEY_METHOD *def_rsa_meth = EVP_PKEY_meth_get0(RSAPKEYMETH_IDX); EVP_PKEY_meth_get_decrypt (def_rsa_meth, (int(**)(EVP_PKEY_CTX *))NULL, &pdecrypt); int ret = pdecrypt(ctx, out, outlen, in, inlen); EVP_PKEY_assign_RSA(pk, rsa_default); RSA_free(rsa); return ret; } static int hpre_evp_sign(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen) { int (*psign)(EVP_PKEY_CTX *, unsigned char *, size_t *, const unsigned char *, size_t); EVP_PKEY *pk = EVP_PKEY_CTX_get0_pkey(ctx); RSA *rsa_default = EVP_PKEY_get1_RSA(pk); RSA *rsa = change_rsa_method(rsa_default); EVP_PKEY_set1_RSA(pk, rsa); const EVP_PKEY_METHOD *def_rsa_meth = EVP_PKEY_meth_get0(RSAPKEYMETH_IDX); EVP_PKEY_meth_get_sign (def_rsa_meth, (int(**)(EVP_PKEY_CTX *))NULL, &psign); int ret = psign(ctx, sig, siglen, tbs, tbslen); EVP_PKEY_assign_RSA(pk, rsa_default); RSA_free(rsa); return ret; } static int hpre_evp_verify(EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen, const unsigned char *tbs, size_t tbslen) { int (*pverify)(EVP_PKEY_CTX *, const unsigned char *, size_t, const unsigned char *, size_t); EVP_PKEY *pk = EVP_PKEY_CTX_get0_pkey(ctx); RSA *rsa_default = EVP_PKEY_get1_RSA(pk); RSA *rsa = change_rsa_method(rsa_default); EVP_PKEY_set1_RSA(pk, rsa); const EVP_PKEY_METHOD *def_rsa_meth = EVP_PKEY_meth_get0(RSAPKEYMETH_IDX); EVP_PKEY_meth_get_verify (def_rsa_meth, (int(**)(EVP_PKEY_CTX *))NULL, &pverify); int ret = pverify(ctx, sig, siglen, tbs, tbslen); EVP_PKEY_assign_RSA(pk, rsa_default); RSA_free(rsa); return ret; } //lint -save -e506 #undef GOTOEND_IF #define GOTOEND_IF(cond, mesg, f, r) \ if (cond) { \ KAEerr(f, r); \ US_ERR(mesg); \ ret = HPRE_CRYPTO_FAIL; \ rsa_soft_mark = 1; \ goto end;\ } \ static int hpre_rsa_check(const int flen, const BIGNUM *n, const BIGNUM *e, int *num_bytes, RSA *rsa) { int key_bits; if (n == NULL || e == NULL) { return HPRE_CRYPTO_FAIL; } if (check_pubkey_param(n, e) != HPRE_CRYPTO_SUCC) { return HPRE_CRYPTO_FAIL; } *num_bytes = BN_num_bytes(n); if (flen > *num_bytes) { KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_DATA_GREATER_THEN_MOD_LEN); US_WARN("data length is large than num bytes of rsa->n"); return HPRE_CRYPTO_FAIL; } key_bits = RSA_bits(rsa); if (!check_bit_useful(key_bits)) { US_WARN("op sizes not supported by hpre engine then back to soft!"); return HPRE_CRYPTO_FAIL; } return HPRE_CRYPTO_SUCC; } static int hpre_rsa_prepare_opdata(const BIGNUM *n, int flen, const unsigned char *from, BN_CTX **bn_ctx, BIGNUM **bn_ret, BIGNUM **f_ret) { BN_CTX *bn_ctx_tmp; BIGNUM *bn_ret_tmp = NULL; BIGNUM *f = NULL; bn_ctx_tmp = BN_CTX_new(); if (bn_ctx_tmp == NULL) { KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_MALLOC_FAILURE); US_ERR("fail to new BN_CTX."); return HPRE_CRYPTO_SOFT; } BN_CTX_start(bn_ctx_tmp); bn_ret_tmp = BN_CTX_get(bn_ctx_tmp); f = BN_CTX_get(bn_ctx_tmp); if (bn_ret_tmp == NULL || f == NULL) { KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_MALLOC_FAILURE); US_ERR("fail to get BN_CTX."); return HPRE_CRYPTO_SOFT; } if (BN_bin2bn(from, flen, f) == NULL) { KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_ERR_LIB_BN); US_ERR("fail to bin2bn"); return HPRE_CRYPTO_SOFT; } if (BN_ucmp(f, n) >= 0) { KAEerr(KAE_F_HPRE_RSA_PUBDEC, KAE_R_DATA_TOO_LARGE_FOR_MODULUS); US_ERR("data is too large"); return HPRE_CRYPTO_SOFT; } *bn_ctx = bn_ctx_tmp; *bn_ret = bn_ret_tmp; *f_ret = f; return HPRE_CRYPTO_SUCC; } static int hpre_rsa_public_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) { int rsa_soft_mark = 0; const BIGNUM *n = NULL; const BIGNUM *e = NULL; const BIGNUM *d = NULL; BIGNUM *ret_bn = NULL; hpre_engine_ctx_t *eng_ctx = NULL; unsigned char *in_buf = NULL; if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) { return HPRE_CRYPTO_FAIL; } int key_bits = RSA_bits(rsa); if (!check_bit_useful(key_bits)) { US_WARN("op sizes not supported by hpre engine then back to soft!"); return hpre_rsa_soft_calc(flen, from, to, rsa, padding, PUB_ENC); } eng_ctx = hpre_get_eng_ctx(rsa, 0); if (eng_ctx == NULL) { US_WARN("get eng ctx fail then switch to soft!"); rsa_soft_mark = 1; goto end_soft; } RSA_get0_key(rsa, &n, &e, &d); int ret = check_pubkey_param(n, e); GOTOEND_IF(ret != HPRE_CRYPTO_SUCC, "check public key fail", KAE_F_HPRE_RSA_PUBENC, KAE_R_PUBLIC_KEY_INVALID); BN_CTX *bn_ctx = BN_CTX_new(); GOTOEND_IF(bn_ctx == NULL, "bn_ctx MALLOC FAILED!", KAE_F_HPRE_RSA_PUBENC, KAE_R_MALLOC_FAILURE); BN_CTX_start(bn_ctx); ret_bn = BN_CTX_get(bn_ctx); int num_bytes = BN_num_bytes(n); in_buf = (unsigned char *)OPENSSL_malloc(num_bytes); GOTOEND_IF(ret_bn == NULL || in_buf == NULL, "PUBLIC_ENCRYPT RSA MALLOC FAILED!", KAE_F_HPRE_RSA_PUBENC, KAE_R_MALLOC_FAILURE); ret = hpre_rsa_padding(flen, from, in_buf, num_bytes, padding, PUB_ENC); GOTOEND_IF(ret == HPRE_CRYPTO_FAIL, "RSA PADDING FAILED", KAE_F_HPRE_RSA_PUBENC, KAE_R_RSA_PADDING_FAILURE); hpre_rsa_fill_pubkey(e, n, eng_ctx); eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; eng_ctx->opdata.op_type = WCRYPTO_RSA_VERIFY; eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); kae_memcpy(eng_ctx->opdata.in, in_buf, eng_ctx->opdata.in_bytes); ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); GOTOEND_IF(HPRE_CRYPTO_FAIL == ret, "hpre rsa pub encrypt failed!", KAE_F_HPRE_RSA_PUBENC, KAE_R_PUBLIC_ENCRYPTO_FAILURE); BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, ret_bn); ret = BN_bn2binpad(ret_bn, to, num_bytes); US_DEBUG("hpre rsa public encrypt success!"); end: hpre_free_bn_ctx_buf(bn_ctx, in_buf, num_bytes); hpre_free_eng_ctx(eng_ctx); end_soft: if (rsa_soft_mark == 1) { ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PUB_ENC); } return ret; } static int hpre_rsa_private_encrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) { int ret = HPRE_CRYPTO_FAIL; int rsa_soft_mark = 0; hpre_engine_ctx_t *eng_ctx = NULL; BIGNUM *f = (BIGNUM *)NULL; BIGNUM *bn_ret = (BIGNUM *)NULL; BIGNUM *res = (BIGNUM *)NULL; const BIGNUM *n = (const BIGNUM *)NULL; const BIGNUM *e = (const BIGNUM *)NULL; const BIGNUM *d = (const BIGNUM *)NULL; const BIGNUM *p = (const BIGNUM *)NULL; const BIGNUM *q = (const BIGNUM *)NULL; const BIGNUM *dmp1 = (const BIGNUM *)NULL; const BIGNUM *dmq1 = (const BIGNUM *)NULL; const BIGNUM *iqmp = (const BIGNUM *)NULL; unsigned char *in_buf = (unsigned char *)NULL; if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) { return HPRE_CRYPTO_FAIL; } int key_bits = RSA_bits(rsa); if (!check_bit_useful(key_bits)) { US_WARN("op sizes not supported by hpre engine then back to soft!"); return hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_ENC); } eng_ctx = hpre_get_eng_ctx(rsa, 0); if (eng_ctx == NULL) { US_WARN("get eng ctx fail then switch to soft!"); rsa_soft_mark = 1; goto end_soft; } BN_CTX *bn_ctx = BN_CTX_new(); GOTOEND_IF(bn_ctx == NULL, "PRI_ENC MALLOC_FAILURE ", KAE_F_HPRE_RSA_PRIENC, KAE_R_MALLOC_FAILURE); BN_CTX_start(bn_ctx); f = BN_CTX_get(bn_ctx); bn_ret = BN_CTX_get(bn_ctx); RSA_get0_factors(rsa, &p, &q); RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); int version = RSA_get_version(rsa); RSA_get0_key(rsa, &n, &e, &d); int num_bytes = BN_num_bytes(n); in_buf = (unsigned char *)OPENSSL_malloc(num_bytes); GOTOEND_IF(bn_ret == NULL || in_buf == NULL, "OpenSSL malloc failure", KAE_F_HPRE_RSA_PRIENC, KAE_R_MALLOC_FAILURE); ret = hpre_rsa_padding(flen, from, in_buf, num_bytes, padding, PRI_ENC); GOTOEND_IF(ret == HPRE_CRYPTO_FAIL, "RSA PADDING FAILED!", KAE_F_HPRE_RSA_PRIENC, KAE_R_RSA_PADDING_FAILURE); GOTOEND_IF(NULL == BN_bin2bn(in_buf, num_bytes, f), "BN_bin2bn failure", KAE_F_HPRE_RSA_PRIENC, KAE_R_ERR_LIB_BN); ret = BN_ucmp(f, n); GOTOEND_IF(ret >= 0, "RSA PADDING FAILED!", KAE_F_HPRE_RSA_PRIENC, KAE_R_DATA_TOO_LARGE_FOR_MODULUS); hpre_rsa_fill_pubkey(e, n, eng_ctx); hpre_rsa_fill_prikey(rsa, eng_ctx, version, p, q, dmp1, dmq1, iqmp); eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; eng_ctx->opdata.op_type = WCRYPTO_RSA_SIGN; eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); kae_memcpy(eng_ctx->opdata.in, in_buf, eng_ctx->opdata.in_bytes); ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); if (ret == HPRE_CRYPTO_FAIL) { US_WARN("hpre rsa priv encrypt failed!"); rsa_soft_mark = 1; goto end; } BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, bn_ret); if (hpre_get_prienc_res(padding, f, n, bn_ret, &res) == HPRE_CRYPTO_FAIL) { goto end; } ret = BN_bn2binpad(res, to, num_bytes); US_DEBUG("hpre rsa priv encrypt success!"); end: hpre_free_bn_ctx_buf(bn_ctx, in_buf, num_bytes); hpre_free_eng_ctx(eng_ctx); end_soft: if (rsa_soft_mark == 1) { ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_ENC); } return ret; } static int hpre_rsa_public_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) { hpre_engine_ctx_t *eng_ctx = NULL; BIGNUM *bn_ret = NULL; BIGNUM *f = NULL; BN_CTX *bn_ctx = NULL; const BIGNUM *n = NULL; const BIGNUM *e = NULL; const BIGNUM *d = NULL; int num_bytes = 0; int rsa_soft_mark = 0; unsigned char *buf = NULL; if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) { return HPRE_CRYPTO_FAIL; } RSA_get0_key(rsa, &n, &e, &d); int ret = hpre_rsa_check(flen, n, e, &num_bytes, rsa); if (ret == HPRE_CRYPTO_FAIL) { rsa_soft_mark = 1; goto end_soft; } eng_ctx = hpre_get_eng_ctx(rsa, 0); if (eng_ctx == NULL) { US_WARN("get eng ctx fail then switch to soft!"); rsa_soft_mark = 1; goto end_soft; } buf = (unsigned char *)OPENSSL_malloc(num_bytes); if (buf == NULL) { rsa_soft_mark = 1; goto end; } ret = hpre_rsa_prepare_opdata(n, flen, from, &bn_ctx, &bn_ret, &f); if (ret == HPRE_CRYPTO_SOFT) { rsa_soft_mark = 1; goto end; } hpre_rsa_fill_pubkey(e, n, eng_ctx); eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; eng_ctx->opdata.op_type = WCRYPTO_RSA_VERIFY; eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); kae_memcpy(eng_ctx->opdata.in, from, eng_ctx->opdata.in_bytes); ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); GOTOEND_IF(HPRE_CRYPTO_FAIL == ret, "hpre rsa pub decrypt failed!", KAE_F_HPRE_RSA_PUBDEC, KAE_R_PUBLIC_DECRYPTO_FAILURE); BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, bn_ret); if ((padding == RSA_X931_PADDING) && ((bn_get_words(bn_ret)[0] & 0xf) != 12)) { // not 12 then BN_sub GOTOEND_IF(!BN_sub(bn_ret, n, bn_ret), "BN_sub failed", KAE_F_HPRE_RSA_PUBDEC, KAE_R_ERR_LIB_BN); } int len = BN_bn2binpad(bn_ret, buf, num_bytes); ret = check_rsa_padding(to, num_bytes, buf, len, padding, PUB_DEC); if (ret == HPRE_CRYPTO_FAIL) { US_WARN("hpre rsa check padding failed.switch to soft"); rsa_soft_mark = 1; goto end; } US_DEBUG("hpre rsa public decrypt success!"); end: hpre_free_bn_ctx_buf(bn_ctx, buf, num_bytes); hpre_free_eng_ctx(eng_ctx); end_soft: if (rsa_soft_mark == 1) { ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PUB_DEC); } return ret; } static int hpre_rsa_private_decrypt(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding) { int ret = HPRE_CRYPTO_FAIL; const BIGNUM *n = (const BIGNUM *)NULL; const BIGNUM *e = (const BIGNUM *)NULL; const BIGNUM *d = (const BIGNUM *)NULL; const BIGNUM *p = (const BIGNUM *)NULL; const BIGNUM *q = (const BIGNUM *)NULL; const BIGNUM *dmp1 = (const BIGNUM *)NULL; const BIGNUM *dmq1 = (const BIGNUM *)NULL; const BIGNUM *iqmp = (const BIGNUM *)NULL; BIGNUM *f = (BIGNUM *)NULL; BIGNUM *bn_ret = (BIGNUM *)NULL; int len; int rsa_soft_mark = 0; unsigned char *buf = (unsigned char *)NULL; BN_CTX *bn_ctx = NULL; if (hpre_rsa_check_para(flen, from, to, rsa) != HPRE_CRYPTO_SUCC) { return HPRE_CRYPTO_FAIL; } RSA_get0_key(rsa, &n, &e, &d); int num_bytes = BN_num_bytes(n); if (flen > num_bytes) { KAEerr(KAE_F_HPRE_RSA_PRIDEC, KAE_R_DATA_GREATER_THEN_MOD_LEN); US_ERR("PRIVATE_DECRYPT DATA_GREATER_THAN_MOD_LEN"); return HPRE_CRYPTO_FAIL; } int key_bits = RSA_bits(rsa); if (!check_bit_useful(key_bits)) { US_WARN("op sizes not supported by hpre engine then back to soft!"); return hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_DEC); } hpre_engine_ctx_t *eng_ctx = hpre_get_eng_ctx(rsa, 0); if (eng_ctx == NULL) { US_WARN("get eng ctx fail then switch to soft!"); rsa_soft_mark = 1; goto end_soft; } bn_ctx = BN_CTX_new(); GOTOEND_IF(bn_ctx == NULL, "bn_ctx MALLOC FAILED!", KAE_F_HPRE_RSA_PRIDEC, KAE_R_ERR_LIB_BN); BN_CTX_start(bn_ctx); f = BN_CTX_get(bn_ctx); bn_ret = BN_CTX_get(bn_ctx); RSA_get0_factors(rsa, &p, &q); RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); int version = RSA_get_version(rsa); buf = (unsigned char *)OPENSSL_malloc(num_bytes); GOTOEND_IF(bn_ret == NULL || buf == NULL, "PRIVATE_DECRYPT ERR_R_MALLOC_FAILURE", KAE_F_HPRE_RSA_PRIDEC, KAE_R_MALLOC_FAILURE); GOTOEND_IF(BN_bin2bn(from, (int) flen, f) == NULL, "BN_bin2bn failure", KAE_F_HPRE_RSA_PRIDEC, KAE_R_ERR_LIB_BN); GOTOEND_IF(BN_ucmp(f, n) >= 0, "PRIVATE_DECRYPT, RSA_R_DATA_TOO_LARGE_FOR_MODULUS", KAE_F_HPRE_RSA_PRIDEC, KAE_R_DATA_TOO_LARGE_FOR_MODULUS); hpre_rsa_fill_pubkey(e, n, eng_ctx); hpre_rsa_fill_prikey(rsa, eng_ctx, version, p, q, dmp1, dmq1, iqmp); eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; eng_ctx->opdata.op_type = WCRYPTO_RSA_SIGN; eng_ctx->opdata.in = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); eng_ctx->opdata.out = eng_ctx->rsa_setup.br.alloc(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->qlist->kae_queue_mem_pool->block_size); kae_memcpy(eng_ctx->opdata.in, from, eng_ctx->opdata.in_bytes); ret = hpre_rsa_crypto(eng_ctx, &eng_ctx->opdata); if (ret == HPRE_CRYPTO_FAIL) { US_WARN("hpre rsa priv decrypt failed.switch to soft"); rsa_soft_mark = 1; goto end; } BN_bin2bn((const unsigned char *)eng_ctx->opdata.out, eng_ctx->opdata.out_bytes, bn_ret); len = BN_bn2binpad(bn_ret, buf, num_bytes); ret = check_rsa_padding(to, num_bytes, buf, len, padding, PRI_DEC); if (ret == HPRE_CRYPTO_FAIL) { US_WARN("hpre rsa check padding failed.switch to soft"); rsa_soft_mark = 1; goto end; } US_DEBUG("hpre rsa priv decrypt success!"); end: hpre_free_bn_ctx_buf(bn_ctx, buf, num_bytes); hpre_free_eng_ctx(eng_ctx); end_soft: if (rsa_soft_mark == 1) { ret = hpre_rsa_soft_calc(flen, from, to, rsa, padding, PRI_DEC); } return ret; } static int hpre_rsa_keygen(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb) { int ret = HPRE_CRYPTO_FAIL; int rsa_soft_mark = 0; struct wcrypto_rsa_pubkey *pubkey = NULL; struct wcrypto_rsa_prikey *prikey = NULL; struct wd_dtb *wd_e = NULL; struct wd_dtb *wd_p = NULL; struct wd_dtb *wd_q = NULL; if (bits < RSA_MIN_MODULUS_BITS) { KAEerr(KAE_F_HPRE_RSA_KEYGEN, KAE_R_RSA_KEY_SIZE_TOO_SMALL); US_ERR("RSA_BUILTIN_KEYGEN RSA_R_KEY_SIZE_TOO_SMALL"); return HPRE_CRYPTO_FAIL; } if (!check_bit_useful(bits)) { US_WARN("op sizes not supported by hpre engine then back to soft!"); return hpre_rsa_soft_genkey(rsa, bits, e, cb); } hpre_engine_ctx_t *eng_ctx = hpre_get_eng_ctx(rsa, bits); if (eng_ctx == NULL) { US_WARN("get eng ctx fail then switch to soft!"); rsa_soft_mark = 1; goto end_soft; } BIGNUM *e_value = BN_new(); BIGNUM *p = BN_new(); BIGNUM *q = BN_new(); GOTOEND_IF(e_value == NULL || p == NULL || q == NULL, "e_value or p or q MALLOC FAILED.", KAE_F_HPRE_RSA_KEYGEN, KAE_R_ERR_LIB_BN); GOTOEND_IF(hpre_rsa_primegen(bits, e, p, q, cb) == OPENSSL_FAIL, "hisi_rsa_primegen failed", KAE_F_HPRE_RSA_KEYGEN, KAE_R_GET_PRIMEKEY_FAILURE); GOTOEND_IF(BN_copy(e_value, e) == NULL, "copy e failed", KAE_F_HPRE_RSA_KEYGEN, KAE_R_ERR_LIB_BN); wcrypto_get_rsa_pubkey(eng_ctx->ctx, &pubkey); wcrypto_get_rsa_pubkey_params(pubkey, &wd_e, NULL); wd_e->dsize = BN_bn2bin(e_value, (unsigned char *)wd_e->data); wcrypto_get_rsa_prikey(eng_ctx->ctx, &prikey); wcrypto_get_rsa_crt_prikey_params(prikey, NULL, NULL, NULL, &wd_q, &wd_p); wd_q->dsize = BN_bn2bin(q, (unsigned char *)wd_q->data); wd_p->dsize = BN_bn2bin(p, (unsigned char *)wd_p->data); eng_ctx->opdata.in_bytes = eng_ctx->priv_ctx.key_size; eng_ctx->opdata.op_type = WCRYPTO_RSA_GENKEY; ret = hpre_fill_keygen_opdata(eng_ctx->ctx, &eng_ctx->opdata); if (ret != KAE_SUCCESS) { US_WARN("hpre_fill_keygen_opdata failed"); rsa_soft_mark = 1; goto end; } ret = hpre_rsa_sync(eng_ctx->ctx, &eng_ctx->opdata); if (ret == HPRE_CRYPTO_FAIL) { US_WARN("hpre generate rsa key failed.switch to soft"); rsa_soft_mark = 1; goto end; } ret = hpre_rsa_get_keygen_param(&eng_ctx->opdata, eng_ctx->ctx, rsa, e_value, p, q); US_DEBUG("hpre rsa keygen success!"); end: hpre_free_eng_ctx(eng_ctx); end_soft: if (rsa_soft_mark == 1) { ret = hpre_rsa_soft_genkey(rsa, bits, e, cb); } return ret; } static int hpre_rsa_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { US_DEBUG("- Started\n"); return RSA_meth_get_mod_exp(RSA_PKCS1_OpenSSL()) (r0, I, rsa, ctx); } static int hpre_bn_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx) { US_DEBUG("- Started\n"); return RSA_meth_get_bn_mod_exp(RSA_PKCS1_OpenSSL()) (r, a, p, m, ctx, m_ctx); } KAE/alg/pkey/hpre_wd.c0000644060212406010010000003364513616500010011705 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE rsa using wd interface * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <string.h> #include <fcntl.h> #include <sched.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/syscall.h> #include <sys/stat.h> #include <sys/time.h> #include <unistd.h> #include <semaphore.h> #include <openssl/rsa.h> #include <openssl/evp.h> #include <openssl/engine.h> #include <openssl/err.h> #include <openssl/async.h> #include "hpre_wd.h" #include "wd_rsa.h" #include "async_callback.h" #include "async_task_queue.h" #include "async_event.h" #include "wd_queue_memory.h" #include "engine_types.h" #include "utils/engine_check.h" static void hpre_rsa_cb(const void *message, void *tag); KAE_QUEUE_POOL_HEAD_S *g_hpre_rsa_qnode_pool = NULL; int wd_hpre_init_qnode_pool() { kae_queue_pool_destroy(g_hpre_rsa_qnode_pool, NULL); g_hpre_rsa_qnode_pool = kae_init_queue_pool(WCRYPTO_RSA); if (g_hpre_rsa_qnode_pool == NULL) { WD_ERR("hpre rsa qnode poll init fail!\n"); return KAE_FAIL; } return KAE_SUCCESS; } KAE_QUEUE_POOL_HEAD_S *wd_hpre_get_qnode_pool() { return g_hpre_rsa_qnode_pool; } static hpre_engine_ctx_t *hpre_new_eng_ctx(RSA *rsa_alg) { hpre_engine_ctx_t *eng_ctx = NULL; eng_ctx = (hpre_engine_ctx_t *)OPENSSL_malloc(sizeof(hpre_engine_ctx_t)); if (eng_ctx == NULL) { US_ERR("hpre engine_ctx malloc fail"); return NULL; } kae_memset(eng_ctx, 0, sizeof(hpre_engine_ctx_t)); eng_ctx->priv_ctx.ssl_alg = rsa_alg; eng_ctx->qlist = kae_get_node_from_pool(g_hpre_rsa_qnode_pool); if (eng_ctx->qlist == NULL) { US_ERR_LIMIT("error. get hardware queue failed"); OPENSSL_free(eng_ctx); eng_ctx = NULL; return NULL; } eng_ctx->priv_ctx.is_privkey_ready = UNSET; eng_ctx->priv_ctx.is_pubkey_ready = UNSET; return eng_ctx; } static int hpre_init_eng_ctx(hpre_engine_ctx_t *eng_ctx, int bits) { struct wd_queue *q = eng_ctx->qlist->kae_wd_queue; struct wd_queue_mempool *pool = eng_ctx->qlist->kae_queue_mem_pool; // this is for ctx is in use.we dont need to re create ctx->ctx again if (eng_ctx->ctx && eng_ctx->opdata.in) { kae_memset(eng_ctx->opdata.in, 0, eng_ctx->opdata.in_bytes); return OPENSSL_SUCCESS; } if (eng_ctx->ctx == NULL) { if (bits == 0) { eng_ctx->priv_ctx.key_size = RSA_size(eng_ctx->priv_ctx.ssl_alg); } else { eng_ctx->priv_ctx.key_size = bits >> BIT_BYTES_SHIFT; } eng_ctx->rsa_setup.key_bits = eng_ctx->priv_ctx.key_size << BIT_BYTES_SHIFT; eng_ctx->rsa_setup.is_crt = ISSET; eng_ctx->rsa_setup.cb = (wcrypto_cb)hpre_rsa_cb; eng_ctx->rsa_setup.br.alloc = kae_wd_alloc_blk; eng_ctx->rsa_setup.br.free = kae_wd_free_blk; eng_ctx->rsa_setup.br.iova_map = kae_dma_map; eng_ctx->rsa_setup.br.iova_unmap = kae_dma_unmap; eng_ctx->rsa_setup.br.usr = pool; eng_ctx->ctx = wcrypto_create_rsa_ctx(q, &eng_ctx->rsa_setup); if (eng_ctx->ctx == NULL) { US_ERR("create rsa ctx fail!"); return OPENSSL_FAIL; } } return OPENSSL_SUCCESS; } hpre_engine_ctx_t *hpre_get_eng_ctx(RSA *rsa, int bits) { hpre_engine_ctx_t *eng_ctx = hpre_new_eng_ctx(rsa); if (eng_ctx == NULL) { US_WARN("new eng ctx fail then switch to soft!"); return NULL; } if (hpre_init_eng_ctx(eng_ctx, bits) == 0) { hpre_free_eng_ctx(eng_ctx); US_WARN("init eng ctx fail then switch to soft!"); return NULL; } return eng_ctx; } void hpre_free_eng_ctx(hpre_engine_ctx_t *eng_ctx) { US_DEBUG("hpre rsa free engine ctx start!"); if (eng_ctx == NULL) { US_DEBUG("no eng_ctx to free"); return; } if (eng_ctx->qlist != NULL) { hpre_free_rsa_ctx(eng_ctx->ctx); kae_put_node_to_pool(g_hpre_rsa_qnode_pool, eng_ctx->qlist); } if (eng_ctx->opdata.op_type != WCRYPTO_RSA_GENKEY) { if (eng_ctx->opdata.in) { eng_ctx->rsa_setup.br.free(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->opdata.in); } if (eng_ctx->opdata.out) { if (eng_ctx->qlist != NULL) { eng_ctx->rsa_setup.br.free(eng_ctx->qlist->kae_queue_mem_pool, eng_ctx->opdata.out); } } } else { if (eng_ctx->opdata.in) { wcrypto_del_kg_in(eng_ctx->ctx, (struct wcrypto_rsa_kg_in *)eng_ctx->opdata.in); } if (eng_ctx->opdata.out) { wcrypto_del_kg_out(eng_ctx->ctx, (struct wcrypto_rsa_kg_out *)eng_ctx->opdata.out); } } eng_ctx->priv_ctx.ssl_alg = NULL; eng_ctx->qlist = NULL; eng_ctx->ctx = NULL; eng_ctx->opdata.in = NULL; eng_ctx->opdata.out = NULL; eng_ctx->priv_ctx.is_privkey_ready = UNSET; eng_ctx->priv_ctx.is_pubkey_ready = UNSET; OPENSSL_free(eng_ctx); eng_ctx = NULL; return; } void hpre_free_rsa_ctx(void *ctx) { if (ctx != NULL) { wcrypto_del_rsa_ctx(ctx); ctx = NULL; } } void hpre_rsa_fill_pubkey(const BIGNUM *e, const BIGNUM *n, hpre_engine_ctx_t *eng_ctx) { struct wcrypto_rsa_pubkey *pubkey = NULL; struct wd_dtb *wd_e = NULL; struct wd_dtb *wd_n = NULL; wcrypto_get_rsa_pubkey(eng_ctx->ctx, &pubkey); wcrypto_get_rsa_pubkey_params(pubkey, &wd_e, &wd_n); if (!eng_ctx->priv_ctx.is_pubkey_ready) { wd_e->dsize = BN_bn2bin(e, (unsigned char *)wd_e->data); wd_n->dsize = BN_bn2bin(n, (unsigned char *)wd_n->data); eng_ctx->priv_ctx.is_pubkey_ready = ISSET; } return; } /** * FILL prikey to rsa_ctx in normal mode * @param rsa get prikey from rsa * @param rsa_ctx */ static void hpre_rsa_fill_prikey1(RSA *rsa, hpre_engine_ctx_t *eng_ctx) { struct wcrypto_rsa_prikey *prikey = NULL; struct wd_dtb *wd_d = NULL; struct wd_dtb *wd_n = NULL; const BIGNUM *n = (const BIGNUM *)NULL; const BIGNUM *e = (const BIGNUM *)NULL; const BIGNUM *d = (const BIGNUM *)NULL; RSA_get0_key(rsa, &n, &e, &d); wcrypto_get_rsa_prikey(eng_ctx->ctx, &prikey); wcrypto_get_rsa_prikey_params(prikey, &wd_d, &wd_n); if (!eng_ctx->priv_ctx.is_privkey_ready) { wd_d->dsize = BN_bn2bin(d, (unsigned char *)wd_d->data); wd_n->dsize = BN_bn2bin(n, (unsigned char *)wd_n->data); eng_ctx->priv_ctx.is_privkey_ready = ISSET; } return ; } /** * FILL prikey to rsa_ctx in crt mode * @param rsa get prikey from rsa * @param rsa_ctx */ static void hpre_rsa_fill_prikey2(RSA *rsa, hpre_engine_ctx_t *eng_ctx) { struct wcrypto_rsa_prikey *prikey = NULL; struct wd_dtb *wd_dq, *wd_dp, *wd_q, *wd_p, *wd_qinv; const BIGNUM *p = NULL; const BIGNUM *q = NULL; const BIGNUM *dmp1 = NULL; const BIGNUM *dmq1 = NULL; const BIGNUM *iqmp = NULL; RSA_get0_factors(rsa, &p, &q); RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); wcrypto_get_rsa_prikey(eng_ctx->ctx, &prikey); wcrypto_get_rsa_crt_prikey_params(prikey, &wd_dq, &wd_dp, &wd_qinv, &wd_q, &wd_p); if (!eng_ctx->priv_ctx.is_privkey_ready) { wd_dq->dsize = BN_bn2bin(dmq1, (unsigned char *)wd_dq->data); wd_dp->dsize = BN_bn2bin(dmp1, (unsigned char *)wd_dp->data); wd_q->dsize = BN_bn2bin(q, (unsigned char *)wd_q->data); wd_p->dsize = BN_bn2bin(p, (unsigned char *)wd_p->data); wd_qinv->dsize = BN_bn2bin(iqmp, (unsigned char *)wd_qinv->data); eng_ctx->priv_ctx.is_privkey_ready = ISSET; } return ; } void hpre_rsa_fill_prikey(RSA *rsa, hpre_engine_ctx_t *eng_ctx, int version, const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp) { if (RSA_test_flags(rsa, RSA_FLAG_EXT_PKEY) || (version == RSA_ASN1_VERSION_MULTI) || ((p != NULL) && (q != NULL) && (dmp1 != NULL) && (dmq1 != NULL) && (iqmp != NULL))) { hpre_rsa_fill_prikey2(rsa, eng_ctx); } else { hpre_rsa_fill_prikey1(rsa, eng_ctx); } } int hpre_fill_keygen_opdata(void *ctx, struct wcrypto_rsa_op_data *opdata) { struct wd_dtb *wd_e = NULL; struct wd_dtb *wd_p = NULL; struct wd_dtb *wd_q = NULL; struct wcrypto_rsa_pubkey *pubkey = NULL; struct wcrypto_rsa_prikey *prikey = NULL; wcrypto_get_rsa_pubkey(ctx, &pubkey); wcrypto_get_rsa_pubkey_params(pubkey, &wd_e, NULL); wcrypto_get_rsa_prikey(ctx, &prikey); wcrypto_get_rsa_crt_prikey_params(prikey, NULL, NULL, NULL, &wd_q, &wd_p); opdata->in = wcrypto_new_kg_in(ctx, wd_e, wd_p, wd_q); if (!opdata->in) { US_ERR("create rsa kgen in fail!\n"); return -ENOMEM; } opdata->out = wcrypto_new_kg_out(ctx); if (!opdata->out) { wcrypto_del_kg_in(ctx, (struct wcrypto_rsa_kg_in *)opdata->in); US_ERR("create rsa kgen out fail\n"); return -ENOMEM; } return 0; } int hpre_rsa_get_keygen_param(struct wcrypto_rsa_op_data *opdata, void *ctx, RSA *rsa, BIGNUM *e_value, BIGNUM *p, BIGNUM *q) { BIGNUM *n = BN_new(); BIGNUM *d = BN_new(); BIGNUM *dmp1 = BN_new(); BIGNUM *dmq1 = BN_new(); BIGNUM *iqmp = BN_new(); struct wd_dtb wd_d; struct wd_dtb wd_n; struct wd_dtb wd_qinv; struct wd_dtb wd_dq; struct wd_dtb wd_dp; unsigned int key_bits, key_size; struct wcrypto_rsa_kg_out *out = (struct wcrypto_rsa_kg_out *)opdata->out; key_bits = wcrypto_rsa_key_bits(ctx); key_size = key_bits >> BIT_BYTES_SHIFT; wcrypto_get_rsa_kg_out_params(out, &wd_d, &wd_n); wcrypto_get_rsa_kg_out_crt_params(out, &wd_qinv, &wd_dq, &wd_dp); BN_bin2bn((unsigned char *)wd_d.data, key_size, d); BN_bin2bn((unsigned char *)wd_n.data, key_size, n); BN_bin2bn((unsigned char *)wd_qinv.data, wd_qinv.dsize, iqmp); BN_bin2bn((unsigned char *)wd_dq.data, wd_dq.dsize, dmq1); BN_bin2bn((unsigned char *)wd_dp.data, wd_dp.dsize, dmp1); if (!(RSA_set0_key(rsa, n, e_value, d) && RSA_set0_factors(rsa, p, q) && RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))) { KAEerr(KAE_F_RSA_FILL_KENGEN_PARAM, KAE_R_RSA_KEY_NOT_COMPELET); US_ERR("set key failed!"); return OPENSSL_FAIL; } else { return OPENSSL_SUCCESS; } } static void hpre_rsa_cb(const void *message, void *tag) { if (!message || !tag) { US_ERR("hpre cb params err!\n"); return; } struct wcrypto_rsa_msg *msg = (struct wcrypto_rsa_msg *)message; hpre_engine_ctx_t *eng_ctx = (hpre_engine_ctx_t *)tag; eng_ctx->opdata.out = msg->out; eng_ctx->opdata.out_bytes = msg->out_bytes; eng_ctx->opdata.status = msg->result; } int hpre_rsa_sync(void *ctx, struct wcrypto_rsa_op_data *opdata) { void *tag = NULL; if (!ctx || !opdata) { US_ERR("sync params err!"); return HPRE_CRYPTO_FAIL; } int ret = wcrypto_do_rsa(ctx, opdata, tag); if (ret != WD_SUCCESS) { US_ERR("hpre do rsa fail!"); return HPRE_CRYPTO_FAIL; } else { US_DEBUG("hpre do rsa success!"); return HPRE_CRYPTO_SUCC; } } int hpre_rsa_async(hpre_engine_ctx_t *eng_ctx, struct wcrypto_rsa_op_data *opdata, op_done_t *op_done) { int ret = 0; int cnt = 0; enum task_type type = ASYNC_TASK_RSA; void *tag = eng_ctx; do { if (cnt > MAX_SEND_TRY_CNTS) { break; } ret = wcrypto_do_rsa(eng_ctx->ctx, opdata, tag); if (ret == WD_STATUS_BUSY) { if ((async_wake_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || (async_pause_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0))) { US_ERR("hpre wake job or hpre pause job fail!"); ret = 0; break; } cnt++; } } while (ret == WD_STATUS_BUSY); if (ret != WD_SUCCESS) { return HPRE_CRYPTO_FAIL; } if (async_add_poll_task(eng_ctx, op_done, type) == 0) { return HPRE_CRYPTO_FAIL; } return HPRE_CRYPTO_SUCC; } int hpre_rsa_crypto(hpre_engine_ctx_t *eng_ctx, struct wcrypto_rsa_op_data *opdata) { int job_ret; op_done_t op_done; async_init_op_done(&op_done); if (op_done.job != NULL && kae_is_async_enabled()) { if (async_setup_async_event_notification(0) == 0) { US_ERR("hpre async event notifying failed"); async_cleanup_op_done(&op_done); return HPRE_CRYPTO_FAIL; } } else { US_DEBUG("hpre rsa no async Job or async disable, back to sync!"); async_cleanup_op_done(&op_done); return hpre_rsa_sync(eng_ctx->ctx, opdata); } if (hpre_rsa_async(eng_ctx, opdata, &op_done) == HPRE_CRYPTO_FAIL) goto err; do { job_ret = async_pause_job(op_done.job, ASYNC_STATUS_OK); if (job_ret == 0) { US_DEBUG("- pthread_yidle -"); kae_pthread_yield(); } } while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); if (op_done.verifyRst <= 0) { US_ERR("hpre rsa verify result failed with %d", op_done.verifyRst); async_cleanup_op_done(&op_done); return HPRE_CRYPTO_FAIL; } async_cleanup_op_done(&op_done); US_DEBUG("hpre rsa do async job success!"); return HPRE_CRYPTO_SUCC; err: US_ERR("hpre rsa do async job err"); (void)async_clear_async_event_notification(); async_cleanup_op_done(&op_done); return HPRE_CRYPTO_FAIL; } KAE/alg/pkey/hpre_wd.h0000644060212406010010000000470113616500010011701 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the rsa interface for KAE rsa using wd interface * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_WD_H #define HPRE_WD_H #include <semaphore.h> #include "hpre_rsa.h" #include "wd_rsa.h" #include "../wdmngr/wd_queue_memory.h" #include "wd_rsa.h" #define UNSET 0 #define ISSET 1 #define BIT_BYTES_SHIFT 3 #define BN_ULONG unsigned long #define MAX_SEND_TRY_CNTS 50 #define MAX_RECV_TRY_CNTS 3000 #define RSA_BALANCE_TIMES 1280 #define WD_STATUS_BUSY (-EBUSY) struct hpre_priv_ctx { RSA *ssl_alg; int is_pubkey_ready; int is_privkey_ready; int key_size; }; typedef struct hpre_priv_ctx hpre_priv_ctx_t; struct hpre_engine_ctx { void *ctx; struct wcrypto_rsa_op_data opdata; struct wcrypto_rsa_ctx_setup rsa_setup; struct KAE_QUEUE_DATA_NODE *qlist; hpre_priv_ctx_t priv_ctx; }; typedef struct hpre_engine_ctx hpre_engine_ctx_t; int wd_hpre_init_qnode_pool(); KAE_QUEUE_POOL_HEAD_S *wd_hpre_get_qnode_pool(); hpre_engine_ctx_t *hpre_get_eng_ctx(RSA *rsa, int bits); void hpre_free_eng_ctx(hpre_engine_ctx_t *eng_ctx); void hpre_free_rsa_ctx(void *ctx); void hpre_rsa_fill_pubkey(const BIGNUM *e, const BIGNUM *n, hpre_engine_ctx_t *rsa_ctx); void hpre_rsa_fill_prikey(RSA *rsa, hpre_engine_ctx_t *eng_ctx, int version, const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp); int hpre_fill_keygen_opdata(void *ctx, struct wcrypto_rsa_op_data *opdata); int hpre_rsa_get_keygen_param(struct wcrypto_rsa_op_data *opdata, void *ctx, RSA *rsa, BIGNUM *e_value, BIGNUM *p, BIGNUM *q); int hpre_rsa_sync(void *ctx, struct wcrypto_rsa_op_data *opdata); int hpre_rsa_crypto(hpre_engine_ctx_t *eng_ctx, struct wcrypto_rsa_op_data *opdata); #endif KAE/alg/pkey/hpre_rsa_utils.h0000644060212406010010000000301213616500010013266 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the rsa interface for KAE engine utils dealing * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_RSA_UTILS_H #define HPRE_RSA_UTILS_H BN_ULONG *bn_get_words(const BIGNUM *a); void hpre_free_bn_ctx_buf(BN_CTX *bn_ctx, unsigned char *in_buf, int num); int hpre_rsa_check_para(int flen, const unsigned char *from, unsigned char *to, RSA *rsa); int hpre_get_prienc_res(int padding, BIGNUM *f, const BIGNUM *n, BIGNUM *bn_ret, BIGNUM **res); int check_bit_useful(const int bit); int check_pubkey_param(const BIGNUM *n, const BIGNUM *e); int hpre_rsa_padding(int flen, const unsigned char *from, unsigned char *buf, int num, int padding, int type); int check_rsa_padding(unsigned char *to, int num, const unsigned char *buf, int len, int padding, int type); int hpre_rsa_primegen(int bits, BIGNUM *e_value, BIGNUM *p, BIGNUM *q, BN_GENCB *cb); #endif KAE/alg/pkey/hpre_rsa_soft.c0000644060212406010010000000404213616500010013100 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for switch to soft rsa * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <openssl/rsa.h> #include "hpre_rsa.h" #include "engine_log.h" /** * succ: > 0 * fail: 0 */ int hpre_rsa_soft_calc(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding, int type) { US_DEBUG("hpre_rsa_soft_calc.\n"); int ret = 0; const RSA_METHOD *soft_rsa = RSA_PKCS1_OpenSSL(); switch (type) { case PUB_ENC: ret = RSA_meth_get_pub_enc(soft_rsa)(flen, from, to, rsa, padding); break; case PUB_DEC: ret = RSA_meth_get_pub_dec(soft_rsa)(flen, from, to, rsa, padding); break; case PRI_ENC: ret = RSA_meth_get_priv_enc(soft_rsa)(flen, from, to, rsa, padding); break; case PRI_DEC: ret = RSA_meth_get_priv_dec(soft_rsa)(flen, from, to, rsa, padding); break; default: return 0; } return ret; } /** * succ: 1 * fail: 0 */ int hpre_rsa_soft_genkey(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb) { US_DEBUG("hpre_rsa_soft_genkey.\n"); UNUSED(cb); const RSA_METHOD *default_meth = RSA_PKCS1_OpenSSL(); RSA_set_method(rsa, default_meth); int ret = RSA_generate_key_ex(rsa, bits, e, (BN_GENCB *)NULL); if (ret != 1) { US_ERR("rsa soft key generate fail!"); return 0; } return 1; } KAE/alg/pkey/hpre_rsa_soft.h0000644060212406010010000000175313616500010013113 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the rsa interface for soft rsa * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_RSA_SOFT_H #define HPRE_RSA_SOFT_H #include <openssl/rsa.h> int hpre_rsa_soft_calc(int flen, const unsigned char *from, unsigned char *to, RSA *rsa, int padding, int type); int hpre_rsa_soft_genkey(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); #endif KAE/alg/pkey/hpre_rsa_utils.c0000644060212406010010000003410513616500010013270 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine utils dealing with wrapdrive * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <openssl/rsa.h> #include <openssl/err.h> #include <openssl/evp.h> #include "engine_types.h" #include "engine_log.h" #include "hpre_rsa.h" #include "hpre_wd.h" #include "wd_rsa.h" BN_ULONG *bn_get_words(const BIGNUM *a) { return a->d; } void hpre_free_bn_ctx_buf(BN_CTX *bn_ctx, unsigned char *in_buf, int num) { if (bn_ctx != NULL) { BN_CTX_end(bn_ctx); } BN_CTX_free(bn_ctx); if (in_buf != NULL) { OPENSSL_clear_free(in_buf, num); } } /* check parameter */ int hpre_rsa_check_para(int flen, const unsigned char *from, unsigned char *to, RSA *rsa) { if ((rsa == NULL || from == NULL || to == NULL || flen <= 0)) { US_ERR("RSA key %p, input %p or output %p are NULL, \ or flen invalid length.\n", rsa, from, to); return HPRE_CRYPTO_FAIL; } return HPRE_CRYPTO_SUCC; } int hpre_get_prienc_res(int padding, BIGNUM *f, const BIGNUM *n, BIGNUM *bn_ret, BIGNUM **res) { if (padding == RSA_X931_PADDING) { if (!BN_sub(f, n, bn_ret)) { return HPRE_CRYPTO_FAIL; } if (BN_cmp(bn_ret, f) > 0) { *res = f; } else { *res = bn_ret; } } else { *res = bn_ret; } return HPRE_CRYPTO_SUCC; } /** * func: * desc: * Check HPRE rsa bits * * @param bit :rsa bit * @return * succ: 1 * fail: 0 */ int check_bit_useful(const int bit) { switch (bit) { case RSA1024BITS: case RSA2048BITS: case RSA3072BITS: case RSA4096BITS: return 1; default: break; } return 0; } /** * * @param n * @param e * @return success 1 / failed 0 */ int check_pubkey_param(const BIGNUM *n, const BIGNUM *e) { if (BN_num_bits(n) > OPENSSL_RSA_MAX_MODULUS_BITS) { KAEerr(KAE_F_CHECK_PUBKEY_PARAM, KAE_R_MODULE_TOO_LARGE); US_ERR("RSA MODULUS TOO LARGE!"); return HPRE_CRYPTO_FAIL; } if (BN_ucmp(n, e) <= 0) { KAEerr(KAE_F_CHECK_PUBKEY_PARAM, KAE_R_INVAILED_E_VALUE); US_ERR("RSA E VALUE IS NOT VALID!"); return HPRE_CRYPTO_FAIL; } /* for large moduli, enforce exponent limit */ if (BN_num_bits(n) > OPENSSL_RSA_SMALL_MODULUS_BITS) { if (BN_num_bits(e) > OPENSSL_RSA_MAX_PUBEXP_BITS) { KAEerr(KAE_F_CHECK_PUBKEY_PARAM, KAE_R_INVAILED_E_VALUE); US_ERR("RSA E VALUE IS NOT VALID!"); return HPRE_CRYPTO_FAIL; } } return HPRE_CRYPTO_SUCC; } static int hpre_pubenc_padding(int flen, const unsigned char *from, unsigned char *buf, int num, int padding) { int ret = HPRE_CRYPTO_FAIL; switch (padding) { case RSA_PKCS1_PADDING: ret = RSA_padding_add_PKCS1_type_2(buf, num, from, flen); break; case RSA_PKCS1_OAEP_PADDING: ret = RSA_padding_add_PKCS1_OAEP(buf, num, from, flen, NULL, 0); break; case RSA_SSLV23_PADDING: ret = RSA_padding_add_SSLv23(buf, num, from, flen); break; case RSA_NO_PADDING: ret = RSA_padding_add_none(buf, num, from, flen); break; default: KAEerr(KAE_F_HPRE_PUBENC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); US_ERR("RSA UNKNOWN PADDING TYPE!"); ret = HPRE_CRYPTO_FAIL; } if (ret <= 0) { US_ERR("padding error: ret = %d", ret); ret = HPRE_CRYPTO_FAIL; } else { ret = HPRE_CRYPTO_SUCC; } return ret; } static int hpre_prienc_padding(int flen, const unsigned char *from, unsigned char *buf, int num, int padding) { int ret = HPRE_CRYPTO_FAIL; switch (padding) { case RSA_PKCS1_PADDING: ret = RSA_padding_add_PKCS1_type_1(buf, num, from, flen); break; case RSA_X931_PADDING: ret = RSA_padding_add_X931(buf, num, from, flen); break; case RSA_NO_PADDING: ret = RSA_padding_add_none(buf, num, from, flen); break; default: KAEerr(KAE_F_HPRE_PRIENC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); US_ERR("RSA UNKNOWN PADDING TYPE!"); ret = HPRE_CRYPTO_FAIL; } if (ret <= 0) { US_DEBUG("padding error: ret = %d", ret); ret = HPRE_CRYPTO_FAIL; } else { ret = HPRE_CRYPTO_SUCC; } return ret; } /** * func: * * @param flen [IN] - size in bytes of input * @param from [IN] - pointer to the input * @param buf [OUT] - pointer to output data * @param num [IN] - pointer to public key structure * @param padding [IN] - Padding scheme * @param type [IN] - Padding type * @return * SUCCESS: 1 * FAIL: 0 * desc: * rsa encrypt padding. * */ int hpre_rsa_padding(int flen, const unsigned char *from, unsigned char *buf, int num, int padding, int type) { int ret = HPRE_CRYPTO_FAIL; if (type == PUB_ENC) { return hpre_pubenc_padding(flen, from, buf, num, padding); } else if (type == PRI_ENC) { return hpre_prienc_padding(flen, from, buf, num, padding); } US_ERR("hpre rsa padding type error."); return ret; } static int hpre_check_pubdec_padding(unsigned char *to, int num, const unsigned char *buf, int len, int padding) { int ret = HPRE_CRYPTO_FAIL; switch (padding) { case RSA_PKCS1_PADDING: ret = RSA_padding_check_PKCS1_type_1(to, num, buf, len, num); break; case RSA_X931_PADDING: ret = RSA_padding_check_X931(to, num, buf, len, num); break; case RSA_NO_PADDING: kae_memcpy(to, buf, len); ret = len; break; default: KAEerr(KAE_F_CHECK_HPRE_PUBDEC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); US_ERR("RSA UNKNOWN PADDING TYPE!"); ret = HPRE_CRYPTO_FAIL; } if (ret == -1) { US_ERR("FAIL ret = %d.", ret); ret = HPRE_CRYPTO_FAIL; } return ret; } static int hpre_check_pridec_padding(unsigned char *to, int num, const unsigned char *buf, int len, int padding) { int ret = HPRE_CRYPTO_FAIL; switch (padding) { case RSA_PKCS1_PADDING: ret = RSA_padding_check_PKCS1_type_2(to, num, buf, len, num); break; case RSA_PKCS1_OAEP_PADDING: ret = RSA_padding_check_PKCS1_OAEP(to, num, buf, len, num, NULL, 0); break; case RSA_SSLV23_PADDING: ret = RSA_padding_check_SSLv23(to, num, buf, len, num); break; case RSA_NO_PADDING: kae_memcpy(to, buf, len); ret = len; break; default: KAEerr(KAE_F_CHECK_HPRE_PRIDEC_PADDING, KAE_R_UNKNOW_PADDING_TYPE); US_ERR("RSA UNKNOWN PADDING TYPE!"); ret = HPRE_CRYPTO_FAIL; } if (ret == -1) { US_ERR("FAIL ret = %d.", ret); ret = HPRE_CRYPTO_FAIL; } return ret; } /** * func: * * @param len [IN] - size in bytes of output * @param to [IN] - pointer to the output * @param buf [OUT] - pointer to output data * @param num [IN] - pointer to public key structure * @param padding [IN] - Padding scheme * @param type [IN] - Padding type * @return * SUCCESS: 1 * FAIL: 0 * desc: * rsa decrypt padding. * */ int check_rsa_padding(unsigned char *to, int num, const unsigned char *buf, int len, int padding, int type) { int ret = HPRE_CRYPTO_FAIL; if (type == PUB_DEC) { return hpre_check_pubdec_padding(to, num, buf, len, padding); } else if (type == PRI_DEC) { return hpre_check_pridec_padding(to, num, buf, len, padding); } US_ERR("hpre rsa padding type error."); return ret; } static int check_primeequal(int i, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *prime) { int j; for (j = 0; j < i; j++) { BIGNUM *prev_prime = NULL; if (j == 0) { prev_prime = rsa_p; } else { prev_prime = rsa_q; } if (!BN_cmp(prime, prev_prime)) { return KAE_FAIL; } } return KAE_SUCCESS; } static int prime_mul_res(int i, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *r1, BN_CTX *ctx, BN_GENCB *cb) { if (i == 1) { /* we get at least 2 primes */ if (!BN_mul(r1, rsa_p, rsa_q, ctx)) { goto err; } } else { /* i == 0, do nothing */ if (!BN_GENCB_call(cb, 3, i)) { // When a random p has been found, call BN_GENCB_call(cb, 3, *i) goto err; } goto cont; } return KAE_SUCCESS; err: return -1; cont: return 1; } static int check_prime_sufficient(int *i, int *bitsr, int *bitse, int *n, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *r1, BIGNUM *r2, BN_CTX *ctx, BN_GENCB *cb) { BN_ULONG bitst; static int retries = 0; /* calculate n immediately to see if it's sufficient */ int ret = prime_mul_res(*i, rsa_p, rsa_q, r1, ctx, cb); if (ret != KAE_SUCCESS) { return ret; } if (!BN_rshift(r2, r1, *bitse - 4)) { // right shift *bitse - 4 goto err; } bitst = BN_get_word(r2); if (bitst < 0x9 || bitst > 0xF) { *bitse -= bitsr[*i]; if (!BN_GENCB_call(cb, 2, *n++)) { // When the n-th is rejected, call BN_GENCB_call(cb, 2, n) goto err; } if (retries == 4) { // retries max is 4 *i = -1; *bitse = 0; retries = 0; goto cont; } retries++; goto redo; } if (!BN_GENCB_call(cb, 3, *i)) { // When a random p has been found, call BN_GENCB_call(cb, 3, *i) goto err; } retries = 0; return 0; err: return -1; redo: return -2; // if redo return -2 cont: return 1; } static void set_primes(int i, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM **prime) { if (i == 0) { *prime = rsa_p; } else { *prime = rsa_q; } BN_set_flags(*prime, BN_FLG_CONSTTIME); } static int check_prime_useful(int *n, BIGNUM *prime, BIGNUM *r1, BIGNUM *r2, BIGNUM *e_value, BN_CTX *ctx, BN_GENCB *cb) { if (!BN_sub(r2, prime, BN_value_one())) { goto err; } ERR_set_mark(); BN_set_flags(r2, BN_FLG_CONSTTIME); if (BN_mod_inverse(r1, r2, e_value, ctx) != NULL) { goto br; } unsigned long error = ERR_peek_last_error(); if (ERR_GET_LIB(error) == ERR_LIB_BN && ERR_GET_REASON(error) == BN_R_NO_INVERSE) { ERR_pop_to_mark(); } else { goto err; } if (!BN_GENCB_call(cb, 2, *n++)) { // When the n-th is rejected, call BN_GENCB_call(cb, 2, n) goto err; } return 0; err: return -1; br: return 1; } static void switch_p_q(BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *p, BIGNUM *q) { BIGNUM *tmp = (BIGNUM *)NULL; if (BN_cmp(rsa_p, rsa_q) < 0) { tmp = rsa_p; rsa_p = rsa_q; rsa_q = tmp; } BN_copy(q, rsa_q); BN_copy(p, rsa_p); } static int hpre_get_prime_once(int i, const int *bitsr, int *n, BIGNUM *prime, BIGNUM *rsa_p, BIGNUM *rsa_q, BIGNUM *r1, BIGNUM *r2, BIGNUM *e_value, BN_CTX *ctx, BN_GENCB *cb) { int adj = 0; int ret = KAE_FAIL; for (;;) { redo: if (!BN_generate_prime_ex(prime, bitsr[i] + adj, 0, (const BIGNUM *)NULL, (const BIGNUM *)NULL, cb)) { goto err; } /* * prime should not be equal to p, q, r_3... * (those primes prior to this one) */ if (check_primeequal(i, rsa_p, rsa_q, prime) == KAE_FAIL) { goto redo; } ret = check_prime_useful(n, prime, r1, r2, e_value, ctx, cb); if (ret == KAE_FAIL) { goto err; } else if (ret == 1) { break; } } return ret; err: return KAE_FAIL; } int hpre_rsa_primegen(int bits, BIGNUM *e_value, BIGNUM *p, BIGNUM *q, BN_GENCB *cb) { int ok = -1; int primes = 2; int n = 0; int bitse = 0; int i = 0; int bitsr[2]; // 2 bits BN_CTX *ctx = (BN_CTX *)NULL; BIGNUM *r1 = (BIGNUM *)NULL; BIGNUM *r2 = (BIGNUM *)NULL; BIGNUM *prime = (BIGNUM *)NULL; BIGNUM *rsa_p, *rsa_q; ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); r1 = BN_CTX_get(ctx); r2 = BN_CTX_get(ctx); rsa_p = BN_CTX_get(ctx); rsa_q = BN_CTX_get(ctx); if (rsa_q == NULL) { goto err; } /* divide bits into 'primes' pieces evenly */ int quo = bits / primes; bitsr[0] = quo; bitsr[1] = quo; /* generate p, q and other primes (if any) */ for (i = 0; i < primes; i++) { set_primes(i, rsa_p, rsa_q, &prime); redo: if (hpre_get_prime_once(i, bitsr, &n, prime, rsa_p, rsa_q, r1, r2, e_value, ctx, cb) == KAE_FAIL) { goto err; } bitse += bitsr[i]; int ret = check_prime_sufficient(&i, bitsr, &bitse, &n, rsa_p, rsa_q, r1, r2, ctx, cb); if (ret == -1) { goto err; } else if (ret == -2) { // ret = -2 goto redo goto redo; } else if (ret == 1) { continue; } } switch_p_q(rsa_p, rsa_q, p, q); ok = 1; err: if (ok == -1) { KAEerr(KAE_F_HPRE_RSA_PRIMEGEN, KAE_R_ERR_LIB_BN); US_ERR("rsa prime gen failed"); ok = 0; } hpre_free_bn_ctx_buf(ctx, NULL, 0); return ok; } KAE/alg/pkey/hpre_rsa.h0000644060212406010010000000312313616500010012051 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE rsa using wd interface * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef HPRE_RSA_H #define HPRE_RSA_H #include <semaphore.h> #include <asm/types.h> #include <openssl/rsa.h> #include <openssl/err.h> #include <openssl/evp.h> #include <openssl/bn.h> #include <openssl/engine.h> #include "../utils/engine_utils.h" #include "../utils/engine_opensslerr.h" #define RSA_MIN_MODULUS_BITS 512 #define RSA1024BITS 1024 #define RSA2048BITS 2048 #define RSA3072BITS 3072 #define RSA4096BITS 4096 #define HPRE_CONT (-1) #define HPRE_CRYPTO_SUCC 1 #define HPRE_CRYPTO_FAIL 0 #define HPRE_CRYPTO_SOFT (-1) enum { INVAID = 0, PUB_ENC, PUB_DEC, PRI_ENC, PRI_DEC, MAX_CODE, }; struct bignum_st { BN_ULONG *d; int top; int dmax; int neg; int flags; }; RSA_METHOD *hpre_get_rsa_methods(void); int hpre_module_init(); void hpre_destroy(); EVP_PKEY_METHOD *get_rsa_pkey_meth(void); #endif KAE/alg/ciphers/0000755060212406010010000000000013616500010010563 5ustar KAE/alg/ciphers/sec_ciphers_soft.h0000644060212406010010000000362213616500010014261 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the cipher interface for soft ciphers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_ciphers_soft.h * * This file provides the cipher interface for soft ciphers * *****************************************************************************/ #ifndef SEC_CIPHERS_SOFT_H #define SEC_CIPHERS_SOFT_H #include "sec_ciphers.h" #include "engine_kae.h" typedef struct cipher_threshold_table_s { int nid; int threshold; } cipher_threshold_table_t; typedef struct sw_cipher_s { int nid; const EVP_CIPHER *(*get_cipher)(void); } sw_cipher_t; const EVP_CIPHER *sec_ciphers_get_cipher_sw_impl(int nid); int sec_ciphers_sw_get_threshold(int nid); int sec_ciphers_sw_impl_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); int sec_ciphers_sw_impl_cleanup(EVP_CIPHER_CTX *ctx); int sec_ciphers_software_encrypt(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t* priv_ctx); int sec_ciphers_sw_hw_ctx_sync(EVP_CIPHER_CTX *ctx, sec_cipher_priv_ctx_syncto_t direction); int sec_ciphers_ecb_encryt(xts_ecb_data* ecb_encryto, uint8_t* buf_out, uint8_t* buf_in, int buf_len); int sec_ciphers_ecb_decrypt(xts_ecb_data* ecb_encryto, uint8_t* buf_out, uint8_t* buf_in, int buf_len); #endif KAE/alg/ciphers/sec_ciphers_utils.h0000644060212406010010000000322313616500010014443 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the cipher interface for KAE engine utils dealing * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_ciphers_utils.h * * This file provides the implemenation for SEC engine utils dealing * *****************************************************************************/ #ifndef SEC_CIPHERS_CHECKER_H #define SEC_CIPHERS_CHECKER_H #include "sec_ciphers.h" #include "engine_kae.h" #define IV_SIZE 16 enum CIPHERS_MODE { ECB, CBC, CTR, XTS, }; enum CIPHERS_ALG { SM4, AES, DES, M_3DES, }; int sec_ciphers_is_iv_may_overflow(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx); int sec_ciphers_get_cipher_mode(int nid); int sec_ciphers_get_cipher_alg(int nid); void sec_ciphers_ctr_iv_inc(uint8_t *counter, uint32_t c); void sec_ciphers_ctr_iv_sub(uint8_t *counter); void sec_ciphers_xts_iv_inc(cipher_priv_ctx_t* priv_ctx); void sec_ciphers_update_iv(cipher_priv_ctx_t *tmp_docipher_ctx, int cipher_length); #endif KAE/alg/ciphers/sec_ciphers.h0000644060212406010010000000707113616500010013230 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the the interface for KAE engine dealing with wrapdrive * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_cipher.h * * This file provides the interface for SEC engine dealing with wrapdrive * *****************************************************************************/ #ifndef SEC_CIPHERS_H #define SEC_CIPHERS_H #include <openssl/engine.h> #include "wd_cipher.h" #include "wd_queue_memory.h" #define MAX_SEND_TRY_CNTS 50 enum openssl_cipher_enc_t { OPENSSL_DECRYPTION = 0, OPENSSL_ENCRYPTION = 1 }; enum sec_cipher_priv_ctx_syncto { SEC_CIHPER_SYNC_S2W = 1, // software priv ctx sync to hareware priv ctx SEC_CIHPER_SYNC_H2S, // hareware priv ctx sync to software priv ctx }; typedef enum sec_cipher_priv_ctx_syncto sec_cipher_priv_ctx_syncto_t; typedef struct xts_ecb_data_strcut { EVP_CIPHER_CTX *ecb_ctx; const EVP_CIPHER* cipher_type; uint8_t* key2; uint8_t key2_len; uint8_t* iv_out; uint8_t* encryto_iv; uint32_t countNum; } xts_ecb_data; typedef struct cipher_engine_ctx cipher_engine_ctx_t; /* * | 16bytes * n length | offset | | * | <---------first buf -----------><---next buf -->| * the next buf send to warpdriv should start at hardaddr + first offset */ struct cipher_priv_ctx { int32_t encrypt; // encrypt or decryto DECRYPTION = 0, ENCRYPTION = 1 uint32_t inl; // input length uint32_t left_len; // left length for warpdrive to do uint32_t offset; // prev buf offset, that indicate the next buf should start at hardware_addr+offset uint8_t* key; // key uint32_t key_len; // key length uint8_t* iv; // iv uint32_t iv_len; // iv length uint8_t* next_iv; // store IV for next cbc operation in decryption const uint8_t* in; uint8_t* out; uint32_t c_mode; uint32_t c_alg; uint32_t do_cipher_len; // do one cycle cipher length size_t switch_threshold; // crypt small packet offload threshold void* sw_ctx_data; // Pointer for context data that will be used by Small packet offload feature. xts_ecb_data* ecb_encryto; cipher_engine_ctx_t* e_cipher_ctx; }; typedef struct cipher_priv_ctx cipher_priv_ctx_t; struct cipher_engine_ctx { KAE_QUEUE_DATA_NODE_S* q_node; struct wcrypto_cipher_op_data op_data; struct wcrypto_cipher_ctx_setup setup; void* wd_ctx; // one ctx or a list of ctx cipher_priv_ctx_t* priv_ctx; }; int sec_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); void sec_ciphers_free_ciphers(void); int sec_cipher_engine_ctx_poll(void* engnine_ctx); int cipher_module_init(void); void sec_ciphers_cb(const void* msg, void* tag); #endif KAE/alg/ciphers/sec_ciphers_utils.c0000644060212406010010000001367213616500010014447 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine utils dealing with wrapdrive * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_ciphers_utils.c * * This file provides the interface for SEC engine dealing with wrapdrive * *****************************************************************************/ #include "sec_ciphers_utils.h" #include "engine_types.h" #include "sec_ciphers_soft.h" int sec_ciphers_get_cipher_mode(int nid) { uint32_t c_mode = NO_C_MODE; switch (nid) { case NID_aes_128_ecb: case NID_aes_192_ecb: case NID_aes_256_ecb: c_mode = ECB; break; case NID_aes_128_cbc: case NID_aes_192_cbc: case NID_aes_256_cbc: case NID_sm4_cbc: c_mode = CBC; break; case NID_aes_128_ctr: case NID_aes_192_ctr: case NID_aes_256_ctr: case NID_sm4_ctr: c_mode = CTR; break; case NID_aes_128_xts: case NID_aes_256_xts: c_mode = XTS; break; default: US_WARN("nid=%d don't support by sec engine.", nid); break; } return c_mode; } int sec_ciphers_get_cipher_alg(int nid) { uint32_t c_alg = NO_C_ALG; switch (nid) { case NID_sm4_ctr: case NID_sm4_cbc: c_alg = SM4; break; case NID_aes_128_ecb: case NID_aes_192_ecb: case NID_aes_256_ecb: case NID_aes_128_cbc: case NID_aes_192_cbc: case NID_aes_256_cbc: case NID_aes_128_ctr: case NID_aes_192_ctr: case NID_aes_256_ctr: case NID_aes_128_xts: case NID_aes_256_xts: c_alg = AES; break; default: US_WARN("nid=%d don't support by sec engine.", nid); break; } return c_alg; } /* * SEC ENGINE IV: {Flag, Random, Counter} * | <--4--> <--8--> | <---4bytes ---> | * | Flag, Random | counter | */ static unsigned int __iv_to_engine_counter(const uint8_t *iv) { unsigned int counter = 0; const unsigned int SEC_IV_COUNTER_POSTION = 12; counter |= iv[SEC_IV_COUNTER_POSTION]; counter <<= 8; // left shift 8 counter |= iv[(unsigned int)(SEC_IV_COUNTER_POSTION + 1)]; // count num 1 counter <<= 8; // left shift 8 counter |= iv[(unsigned int)(SEC_IV_COUNTER_POSTION + 2)]; // count num 2 counter <<= 8; // left shift 8 counter |= iv[(unsigned int)(SEC_IV_COUNTER_POSTION + 3)]; // count num 3 return counter; } /* increment counter (128-bit int) by c */ void sec_ciphers_ctr_iv_inc(uint8_t *counter, uint32_t c) { uint32_t n = 16; do { --n; c += counter[n]; counter[n] = (uint8_t)c; c >>= 8; // right shift 8 } while (n); } void sec_ciphers_xts_iv_inc(cipher_priv_ctx_t* priv_ctx) { uint32_t i = 0; unsigned int carry; unsigned int res; union { uint64_t u[2]; // union length 2 uint32_t d[4]; // union length 4 uint8_t c[16]; // union length 16 }tweak; kae_memcpy(tweak.c, priv_ctx->ecb_encryto->encryto_iv, 16); // encrypto iv length 16 for (i = 0; i < priv_ctx->ecb_encryto->countNum; i++) { // cppcheck-suppress * res = 0x87 & (((int)tweak.d[3]) >> 31); // algorithm para 31 carry = (unsigned int)(tweak.u[0] >> 63); // algorithm para 63 tweak.u[0] = (tweak.u[0] << 1) ^ res; tweak.u[1] = (tweak.u[1] << 1) | carry; } sec_ciphers_ecb_decrypt(priv_ctx->ecb_encryto, priv_ctx->ecb_encryto->iv_out, tweak.c, 16); // iv len 16 kae_memcpy(priv_ctx->iv, priv_ctx->ecb_encryto->iv_out, 16); // update iv len 16 } void sec_ciphers_ctr_iv_sub(uint8_t *counter) { unsigned int n = 16; int c = 0; do { --n; c = counter[n] < 1 ? 1 : 0; counter[n] = (unsigned char)(counter[n] + c * 256 - 1); // algorithm para 256 if (c == 0) { break; } } while (n); } void sec_ciphers_update_iv(cipher_priv_ctx_t *tmp_docipher_ctx, int cipher_length) { unsigned int inc_counter = 0; switch (tmp_docipher_ctx->c_mode) { case CBC: if (tmp_docipher_ctx->encrypt == OPENSSL_ENCRYPTION) { kae_memcpy(tmp_docipher_ctx->iv, tmp_docipher_ctx->out + cipher_length - IV_SIZE, IV_SIZE); } break; case CTR: inc_counter = cipher_length >> 4; // right shift 4 sec_ciphers_ctr_iv_inc(tmp_docipher_ctx->iv, inc_counter); break; case XTS: // update iv here break; default: break; } return; } int sec_ciphers_is_iv_may_overflow(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) { unsigned int will_inc_counter = 0; unsigned int current_counter = 0; if (sec_ciphers_get_cipher_mode(EVP_CIPHER_CTX_nid(ctx)) == CTR) { // (input length + prev offset)/ 16 = will_inc_counter will_inc_counter = (priv_ctx->inl + priv_ctx->offset) >> 4; // right shift 4 current_counter = __iv_to_engine_counter(priv_ctx->iv); if ((0xFFFFFFFFU - current_counter < will_inc_counter)) { US_DEBUG("ciphers increase iv overflow 0xFFFFFFFF."); return 1; } } return 0; } KAE/alg/ciphers/sec_ciphers_wd.c0000644060212406010010000002311513616500010013712 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE ciphers using wd interface * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_cipher_wd.c * * This file provides the implemenation for SEC ciphers using wd interface * *****************************************************************************/ #include "sec_ciphers_wd.h" #include "sec_ciphers_utils.h" #include "wd_queue_memory.h" #include "engine_utils.h" #include "engine_types.h" #define OUTPUT_CACHE_SIZE (256*1024) #define INPUT_CACHE_SIZE (256*1024) #define MAX_KEY_SIZE 64 #define MAX_IV_SIZE 16 static KAE_QUEUE_POOL_HEAD_S* g_sec_ciphers_qnode_pool = NULL; static cipher_engine_ctx_t* wd_ciphers_new_engine_ctx(KAE_QUEUE_DATA_NODE_S* q_node, cipher_priv_ctx_t* priv_ctx); void wd_ciphers_free_engine_ctx(void* engine_ctx) { cipher_engine_ctx_t* e_cipher_ctx = (cipher_engine_ctx_t *)engine_ctx; if (e_cipher_ctx == NULL) { return; } if (e_cipher_ctx->op_data.in && e_cipher_ctx->setup.br.usr) { e_cipher_ctx->setup.br.free(e_cipher_ctx->setup.br.usr, (void *)e_cipher_ctx->op_data.in); e_cipher_ctx->op_data.in = NULL; } if (e_cipher_ctx->op_data.out && e_cipher_ctx->setup.br.usr) { e_cipher_ctx->setup.br.free(e_cipher_ctx->setup.br.usr, (void *)e_cipher_ctx->op_data.out); e_cipher_ctx->op_data.out = NULL; } if (e_cipher_ctx->op_data.iv && e_cipher_ctx->setup.br.usr) { e_cipher_ctx->setup.br.free(e_cipher_ctx->setup.br.usr, (void *)e_cipher_ctx->op_data.iv); e_cipher_ctx->op_data.iv = NULL; } OPENSSL_free(e_cipher_ctx); e_cipher_ctx = NULL; } static cipher_engine_ctx_t* wd_ciphers_new_engine_ctx(KAE_QUEUE_DATA_NODE_S* q_node, cipher_priv_ctx_t* priv_ctx) { cipher_engine_ctx_t *e_cipher_ctx = NULL; e_cipher_ctx = (cipher_engine_ctx_t *)OPENSSL_malloc(sizeof(cipher_engine_ctx_t)); if (e_cipher_ctx == NULL) { US_ERR("OPENSSL_malloc ctx failed"); return NULL; } kae_memset(e_cipher_ctx, 0, sizeof(cipher_engine_ctx_t)); e_cipher_ctx->setup.br.alloc = kae_wd_alloc_blk; e_cipher_ctx->setup.br.free = kae_wd_free_blk; e_cipher_ctx->setup.br.iova_map = kae_dma_map; e_cipher_ctx->setup.br.iova_unmap = kae_dma_unmap; e_cipher_ctx->setup.br.usr = q_node->kae_queue_mem_pool; e_cipher_ctx->op_data.in = e_cipher_ctx->setup.br.alloc(e_cipher_ctx->setup.br.usr, INPUT_CACHE_SIZE); if (e_cipher_ctx->op_data.in == NULL) { US_ERR("alloc opdata in buf failed"); goto err; } e_cipher_ctx->op_data.out = e_cipher_ctx->setup.br.alloc(e_cipher_ctx->setup.br.usr, OUTPUT_CACHE_SIZE); if (e_cipher_ctx->op_data.out == NULL) { US_ERR("alloc opdata out buf failed"); goto err; } e_cipher_ctx->op_data.iv = e_cipher_ctx->setup.br.alloc(e_cipher_ctx->setup.br.usr, priv_ctx->iv_len); if (e_cipher_ctx->op_data.iv == NULL) { US_ERR("alloc opdata iv buf failed"); goto err; } e_cipher_ctx->priv_ctx = priv_ctx; // point to each other e_cipher_ctx->q_node = q_node; // point to each other q_node->engine_ctx = e_cipher_ctx; // point to each other return e_cipher_ctx; err: (void)wd_ciphers_free_engine_ctx(e_cipher_ctx); return NULL; } static int wd_ciphers_init_engine_ctx(cipher_engine_ctx_t *e_cipher_ctx) { struct wd_queue *q = e_cipher_ctx->q_node->kae_wd_queue; cipher_priv_ctx_t* priv_ctx = e_cipher_ctx->priv_ctx; if (e_cipher_ctx->wd_ctx != NULL) { US_WARN("wd ctx is in used by other ciphers"); return KAE_FAIL; } e_cipher_ctx->setup.alg = (enum wcrypto_cipher_alg)priv_ctx->c_alg; // for example: WD_CIPHER_SM4; e_cipher_ctx->setup.mode = (enum wcrypto_cipher_mode)priv_ctx->c_mode; // for example: WD_CIPHER_CBC; e_cipher_ctx->setup.cb = (wcrypto_cb)sec_ciphers_cb; e_cipher_ctx->wd_ctx = wcrypto_create_cipher_ctx(q, &e_cipher_ctx->setup); if (e_cipher_ctx->wd_ctx == NULL) { US_ERR("wd create sec cipher ctx fail!"); return KAE_FAIL; } wcrypto_set_cipher_key(e_cipher_ctx->wd_ctx, priv_ctx->key, priv_ctx->key_len); return KAE_SUCCESS; } cipher_engine_ctx_t* wd_ciphers_get_engine_ctx(cipher_priv_ctx_t* priv_ctx) { KAE_QUEUE_DATA_NODE_S *q_node = NULL; cipher_engine_ctx_t *e_cipher_ctx = NULL; if (unlikely(priv_ctx == NULL)) { US_ERR("sec cipher priv ctx NULL!"); return NULL; } q_node = kae_get_node_from_pool(g_sec_ciphers_qnode_pool); if (q_node == NULL) { US_ERR_LIMIT("failed to get hardware queue"); return NULL; } e_cipher_ctx = (cipher_engine_ctx_t *)q_node->engine_ctx; if (e_cipher_ctx == NULL) { e_cipher_ctx = wd_ciphers_new_engine_ctx(q_node, priv_ctx); if (e_cipher_ctx == NULL) { US_WARN("sec new engine ctx fail!"); (void)kae_put_node_to_pool(g_sec_ciphers_qnode_pool, q_node); return NULL; } } e_cipher_ctx->priv_ctx = priv_ctx; if (wd_ciphers_init_engine_ctx(e_cipher_ctx) == KAE_FAIL) { US_WARN("init engine ctx fail!"); wd_ciphers_put_engine_ctx(e_cipher_ctx); return NULL; } return e_cipher_ctx; } void wd_ciphers_put_engine_ctx(cipher_engine_ctx_t* e_cipher_ctx) { if (unlikely(e_cipher_ctx == NULL)) { US_WARN("sec cipher engine ctx NULL!"); return; } if (e_cipher_ctx->wd_ctx != NULL) { wcrypto_del_cipher_ctx(e_cipher_ctx->wd_ctx); e_cipher_ctx->wd_ctx = NULL; } if (e_cipher_ctx->priv_ctx && e_cipher_ctx->priv_ctx->ecb_encryto) { if (e_cipher_ctx->priv_ctx->ecb_encryto->ecb_ctx != NULL) { EVP_CIPHER_CTX_free(e_cipher_ctx->priv_ctx->ecb_encryto->ecb_ctx); e_cipher_ctx->priv_ctx->ecb_encryto->ecb_ctx = NULL; } kae_free(e_cipher_ctx->priv_ctx->ecb_encryto->key2); kae_free(e_cipher_ctx->priv_ctx->ecb_encryto->encryto_iv); kae_free(e_cipher_ctx->priv_ctx->ecb_encryto->iv_out); kae_free(e_cipher_ctx->priv_ctx->ecb_encryto); } if (e_cipher_ctx->q_node != NULL) { (void)kae_put_node_to_pool(g_sec_ciphers_qnode_pool, e_cipher_ctx->q_node); } e_cipher_ctx = NULL; return; } int wd_ciphers_do_crypto_impl(cipher_engine_ctx_t *e_cipher_ctx) { int ret = -WD_EINVAL; int trycount = 0; if (unlikely(e_cipher_ctx == NULL)) { US_ERR("do cipher ctx NULL!"); return KAE_FAIL; } again: ret = wcrypto_do_cipher(e_cipher_ctx->wd_ctx, &e_cipher_ctx->op_data, NULL); if (ret != WD_SUCCESS) { if (ret == WD_EBUSY && trycount <= 5) { // try 5 times US_WARN("do cipher busy, retry again!"); trycount++; goto again; } else { US_ERR("do cipher failed!"); return KAE_FAIL; } } return KAE_SUCCESS; } inline void wd_ciphers_set_input_data(cipher_engine_ctx_t *e_cipher_ctx) { // fill engine ctx opdata cipher_priv_ctx_t* priv_ctx = e_cipher_ctx->priv_ctx; kae_memcpy(((uint8_t *)e_cipher_ctx->op_data.in + priv_ctx->offset), priv_ctx->in, priv_ctx->do_cipher_len); if (priv_ctx->encrypt == OPENSSL_ENCRYPTION) { e_cipher_ctx->op_data.op_type = WCRYPTO_CIPHER_ENCRYPTION; } else { e_cipher_ctx->op_data.op_type = WCRYPTO_CIPHER_DECRYPTION; } e_cipher_ctx->op_data.in_bytes = priv_ctx->do_cipher_len + priv_ctx->offset; // the real out data start at opdata.out + offset e_cipher_ctx->op_data.out_bytes = priv_ctx->offset + priv_ctx->do_cipher_len; kae_memcpy(e_cipher_ctx->op_data.iv, priv_ctx->iv, priv_ctx->iv_len); e_cipher_ctx->op_data.iv_bytes = priv_ctx->iv_len; } inline void wd_ciphers_get_output_data(cipher_engine_ctx_t *e_cipher_ctx) { cipher_priv_ctx_t* priv_ctx = e_cipher_ctx->priv_ctx; // the real out data start at opdata.out + offset kae_memcpy(priv_ctx->out, (uint8_t*)e_cipher_ctx->op_data.out + priv_ctx->offset, priv_ctx->do_cipher_len); } inline uint32_t wd_ciphers_get_do_cipher_len(uint32_t offset, int leftlen) { uint32_t do_cipher_len = 0; int max_input_datalen = INPUT_CACHE_SIZE - offset; /* * Note: Small encrypted block can be encrypted once. * or the last encrypted slice of a large encrypted block */ if (leftlen <= max_input_datalen) { do_cipher_len = leftlen; } else { do_cipher_len = max_input_datalen; } return do_cipher_len; } KAE_QUEUE_POOL_HEAD_S* wd_ciphers_get_qnode_pool(void) { return g_sec_ciphers_qnode_pool; } int wd_ciphers_init_qnode_pool(void) { kae_queue_pool_destroy(g_sec_ciphers_qnode_pool, wd_ciphers_free_engine_ctx); g_sec_ciphers_qnode_pool = kae_init_queue_pool(WCRYPTO_CIPHER); if (g_sec_ciphers_qnode_pool == NULL) { US_ERR("do cipher ctx NULL!"); return KAE_FAIL; } return KAE_SUCCESS; } KAE/alg/ciphers/sec_ciphers_wd.h0000644060212406010010000000320413616500010013714 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the cipher interface for KAE ciphers using wd interface * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_cipher_wd.h * * This file provides the interface for SEC ciphers using wd interface * *****************************************************************************/ #ifndef SEC_CIPHERS_WD_H #define SEC_CIPHERS_WD_H #include "sec_ciphers.h" cipher_engine_ctx_t* wd_ciphers_get_engine_ctx(cipher_priv_ctx_t* priv_ctx); void wd_ciphers_put_engine_ctx(cipher_engine_ctx_t* e_cipher_ctx); int wd_ciphers_do_crypto_impl(cipher_engine_ctx_t *e_cipher_ctx); inline void wd_ciphers_set_input_data(cipher_engine_ctx_t *e_cipher_ctx); inline void wd_ciphers_get_output_data(cipher_engine_ctx_t *e_cipher_ctx); inline uint32_t wd_ciphers_get_do_cipher_len(uint32_t offset, int leftlen); int wd_ciphers_init_qnode_pool(void); KAE_QUEUE_POOL_HEAD_S* wd_ciphers_get_qnode_pool(void); void wd_ciphers_free_engine_ctx(void* engine_ctx); #endif KAE/alg/ciphers/sec_ciphers_soft.c0000644060212406010010000002550313616500010014256 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for switch to soft ciphers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_ciphers_soft.c * * This file provides the implemenation for switch to soft ciphers * *****************************************************************************/ #include "engine_types.h" #include "sec_ciphers_soft.h" #include "sec_ciphers.h" #include "sec_ciphers_utils.h" #define CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT 192 static cipher_threshold_table_t g_sec_ciphers_pkt_threshold_table[] = { { NID_aes_128_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_192_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_256_ecb, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_128_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_192_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_256_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_128_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_192_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_256_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_128_xts, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_aes_256_xts, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_sm4_cbc, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, { NID_sm4_ctr, CRYPTO_SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT }, }; static int g_sec_ciphers_pkt_threshold_table_size = BLOCKSIZES_OF(g_sec_ciphers_pkt_threshold_table); static sw_cipher_t g_sec_ciphers_sw_cipher_table[] = { { NID_aes_128_ecb, EVP_aes_128_ecb }, { NID_aes_192_ecb, EVP_aes_192_ecb }, { NID_aes_256_ecb, EVP_aes_256_ecb }, { NID_aes_128_cbc, EVP_aes_128_cbc }, { NID_aes_192_cbc, EVP_aes_192_cbc }, { NID_aes_256_cbc, EVP_aes_256_cbc }, { NID_aes_128_ctr, EVP_aes_128_ctr }, { NID_aes_192_ctr, EVP_aes_192_ctr }, { NID_aes_256_ctr, EVP_aes_256_ctr }, { NID_aes_128_xts, EVP_aes_128_xts }, { NID_aes_256_xts, EVP_aes_256_xts }, { NID_sm4_cbc, EVP_sm4_cbc }, { NID_sm4_ctr, EVP_sm4_ctr }, }; static int g_sec_ciphers_sw_cipher_table_size = BLOCKSIZES_OF(g_sec_ciphers_sw_cipher_table); static int sec_ciphers_sw_impl_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl); int sec_ciphers_sw_get_threshold(int nid) { int i = 0; do { if (g_sec_ciphers_pkt_threshold_table[i].nid == nid) { return g_sec_ciphers_pkt_threshold_table[i].threshold; } } while (++i < g_sec_ciphers_pkt_threshold_table_size); US_ERR("nid %d not found in threshold table", nid); return KAE_FAIL; } const EVP_CIPHER *sec_ciphers_get_cipher_sw_impl(int nid) { int i = 0; for (i = 0; i < g_sec_ciphers_sw_cipher_table_size; i++) { if (nid == g_sec_ciphers_sw_cipher_table[i].nid) { return (g_sec_ciphers_sw_cipher_table[i].get_cipher)(); } } US_WARN("Invalid nid %d\n", nid); return (EVP_CIPHER *)NULL; } int sec_ciphers_sw_impl_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int ret = KAE_FAIL; unsigned int sw_size = 0; cipher_priv_ctx_t* priv_ctx = NULL; const EVP_CIPHER *sw_cipher = NULL; /* allowed iv to be empty. */ if (unlikely(key == NULL)) { US_ERR("kae sw init parameter is NULL. key=%p", key); return KAE_FAIL; } if (unlikely(ctx == NULL)) { US_ERR("kae sw init parameter is NULL. ctx=%p", ctx); return KAE_FAIL; } priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(priv_ctx == NULL)) { US_ERR("state is NULL"); return KAE_FAIL; } sw_cipher = sec_ciphers_get_cipher_sw_impl(EVP_CIPHER_CTX_nid(ctx)); if (unlikely(sw_cipher == NULL)) { int nid = EVP_CIPHER_CTX_nid(ctx); US_ERR("get openssl software cipher failed. nid = %d", nid); return KAE_FAIL; } sw_size = EVP_CIPHER_impl_ctx_size(sw_cipher); if (unlikely(sw_size == 0)) { US_ERR("get EVP cipher ctx size failed, sw_size=%d", sw_size); return KAE_FAIL; } if (priv_ctx->sw_ctx_data == NULL) { priv_ctx->sw_ctx_data = kae_malloc(sw_size); if (priv_ctx->sw_ctx_data == NULL) { US_ERR("Unable to allocate memory [%u bytes] for sw_ctx_data", sw_size); return KAE_FAIL; } } kae_memset(priv_ctx->sw_ctx_data, 0, sw_size); if (iv == NULL) { iv = EVP_CIPHER_CTX_iv_noconst(ctx); } /* real implementation: Openssl soft arithmetic key initialization function */ EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx->sw_ctx_data); ret = EVP_CIPHER_meth_get_init(sw_cipher)(ctx, key, iv, enc); EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx); if (ret != OPENSSL_SUCCESS) { US_ERR("OPENSSL init key failed. ctx=%p", ctx); kae_free(priv_ctx->sw_ctx_data); return KAE_FAIL; } US_DEBUG("kae sw init impl success. ctx=%p", ctx); return KAE_SUCCESS; } int sec_ciphers_sw_impl_cleanup(EVP_CIPHER_CTX *ctx) { cipher_priv_ctx_t* priv_ctx = NULL; if (unlikely(ctx == NULL)) { US_WARN("ctx is NULL"); return KAE_FAIL; } #ifdef KAE_DEBUG_KEY_ENABLE dump_data("iv", EVP_CIPHER_CTX_iv_noconst(ctx), EVP_CIPHER_CTX_iv_length(ctx)); #endif priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(priv_ctx == NULL)) { US_WARN("ctx cipher private data is NULL."); return KAE_FAIL; } kae_free(priv_ctx->sw_ctx_data); US_DEBUG("kae sw cleanup impl success, ctx=%p", ctx); return KAE_SUCCESS; } static int sec_ciphers_sw_impl_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) { if (unlikely((ctx == NULL) || (out == NULL) || (in == NULL))) { US_ERR("kae sw cipher parameter is null.ctx=%p, in=%p, out=%p, inl=%d", ctx, out, in, (int)inl); return KAE_FAIL; } cipher_priv_ctx_t* priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(priv_ctx == NULL)) { US_ERR("state is NULL"); return KAE_FAIL; } const EVP_CIPHER* sw_cipher = sec_ciphers_get_cipher_sw_impl(EVP_CIPHER_CTX_nid(ctx)); if (unlikely(sw_cipher == NULL)) { US_ERR("get OpenSSL cipher failed. ctx=%p", ctx); return KAE_FAIL; } EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx->sw_ctx_data); int ret = EVP_CIPHER_meth_get_do_cipher(sw_cipher)(ctx, out, in, inl); if (unlikely(ret == OPENSSL_FAIL)) { EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx); US_ERR("OpenSSL do cipher failed. ctx=%p", ctx); return KAE_FAIL; } EVP_CIPHER_CTX_set_cipher_data(ctx, priv_ctx); US_DEBUG("kae sw impl do cipher success, ctx=%p", ctx); return KAE_SUCCESS; } int sec_ciphers_software_encrypt(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t* priv_ctx) { int ret = sec_ciphers_sw_impl_do_cipher(ctx, priv_ctx->out, priv_ctx->in, priv_ctx->left_len); if (ret != KAE_SUCCESS) { US_ERR("kae software do cipher or small packet cipher offload failed."); return KAE_FAIL; } // after openssl software do cipher, sync priv data to next priv data for hareware to contiune to do cipher */ ret = sec_ciphers_sw_hw_ctx_sync(ctx, SEC_CIHPER_SYNC_S2W); if (unlikely(ret != KAE_SUCCESS)) { US_ERR("kae sw hw state sync failed."); return KAE_FAIL; } US_DEBUG("Cipher success, ctx=%p", ctx); return KAE_SUCCESS; } int sec_ciphers_sw_hw_ctx_sync(EVP_CIPHER_CTX *ctx, sec_cipher_priv_ctx_syncto_t direction) { cipher_priv_ctx_t* priv_ctx = NULL; unsigned int num = 0; unsigned int offset = 0; US_DEBUG("sw hw state sync start. ctx=%p", ctx); priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(priv_ctx == NULL)) { US_ERR("cipher priv ctx data is NULL."); return KAE_FAIL; } if (direction == SEC_CIHPER_SYNC_S2W) { kae_memcpy(priv_ctx->iv, EVP_CIPHER_CTX_iv_noconst(ctx), EVP_CIPHER_CTX_iv_length(ctx)); num = EVP_CIPHER_CTX_num(ctx); if (num) { sec_ciphers_ctr_iv_sub(priv_ctx->iv); } priv_ctx->offset = num; priv_ctx->left_len = 0; } else { if (priv_ctx->do_cipher_len != 0) { offset = priv_ctx->offset; kae_memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), priv_ctx->iv, EVP_CIPHER_CTX_iv_length(ctx)); EVP_CIPHER_CTX_set_num(ctx, offset); } } US_DEBUG("state sync success, direct=%d[1:SW_TO_HW, 2:HW_TO_SW], offset=%d", direction, num); return KAE_SUCCESS; } int sec_ciphers_ecb_encryt(xts_ecb_data* ecb_encryto, uint8_t* buf_out, uint8_t* buf_in, int buf_len) { int out_len1, tmplen; /* Encrypt */ if (!EVP_EncryptInit_ex(ecb_encryto->ecb_ctx, ecb_encryto->cipher_type, NULL, ecb_encryto->key2, NULL)) { US_ERR("EVP_EncryptInit failed.\n"); return KAE_FAIL; } EVP_CIPHER_CTX_set_padding(ecb_encryto->ecb_ctx, 0); if (!EVP_EncryptUpdate(ecb_encryto->ecb_ctx, buf_out, &out_len1, buf_in, buf_len)) { US_ERR("EVP_EncryptUpdate failed.\n"); return KAE_FAIL; } if (!EVP_EncryptFinal_ex(ecb_encryto->ecb_ctx, buf_out + out_len1, &tmplen)) { /* Error */ return KAE_FAIL; } out_len1 += tmplen; return KAE_SUCCESS; } int sec_ciphers_ecb_decrypt(xts_ecb_data* ecb_encryto, uint8_t* buf_out, uint8_t* buf_in, int buf_len) { int out_len1, tmplen; /* decrypt */ if (!EVP_DecryptInit_ex(ecb_encryto->ecb_ctx, ecb_encryto->cipher_type, NULL, ecb_encryto->key2, NULL)) { US_ERR("EVP_EncryptInit failed.\n"); return KAE_FAIL; } EVP_CIPHER_CTX_set_padding(ecb_encryto->ecb_ctx, 0); if (!EVP_DecryptUpdate(ecb_encryto->ecb_ctx, buf_out, &out_len1, buf_in, buf_len)) { US_ERR("EVP_EncryptUpdate failed.\n"); return KAE_FAIL; } if (!EVP_DecryptFinal_ex(ecb_encryto->ecb_ctx, buf_out + out_len1, &tmplen)) { /* Error */ return KAE_FAIL; } out_len1 += tmplen; return KAE_SUCCESS; } KAE/alg/ciphers/sec_ciphers.c0000644060212406010010000005565213616500010013233 0ustar /* * Copyright (C) 2019. Huawei Technologies Co.,Ltd.All rights reserved. * * Description: This file provides the implemenation for KAE engine ciphers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /***************************************************************************** * @file sec_ciphers.c * * This file provides the implemenation for ciphers * *****************************************************************************/ #include "sec_ciphers.h" #include "sec_ciphers_soft.h" #include "sec_ciphers_utils.h" #include "sec_ciphers_wd.h" #include "engine_check.h" #include "engine_types.h" #include "engine_utils.h" #include "async_callback.h" #include "async_event.h" #include "async_task_queue.h" #define INPUT_CACHE_SIZE (256 * 1024) struct cipher_info { int nid; int blocksize; int keylen; int ivlen; int flags; EVP_CIPHER *cipher; }; typedef struct cipher_info cipher_info_t; static cipher_info_t g_sec_ciphers_info[] = { {NID_aes_128_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, NULL}, {NID_aes_192_ecb, 16, 24, 0, EVP_CIPH_ECB_MODE, NULL}, {NID_aes_256_ecb, 16, 32, 0, EVP_CIPH_ECB_MODE, NULL}, {NID_aes_128_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, NULL}, {NID_aes_192_cbc, 16, 24, 16, EVP_CIPH_CBC_MODE, NULL}, {NID_aes_256_cbc, 16, 32, 16, EVP_CIPH_CBC_MODE, NULL}, {NID_aes_128_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, NULL}, {NID_aes_192_ctr, 1, 24, 16, EVP_CIPH_CTR_MODE, NULL}, {NID_aes_256_ctr, 1, 32, 16, EVP_CIPH_CTR_MODE, NULL}, {NID_aes_128_xts, 1, 32, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, NULL}, {NID_aes_256_xts, 1, 64, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, NULL}, {NID_sm4_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, NULL}, {NID_sm4_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1, NULL}, }; #define CIPHERS_COUNT (BLOCKSIZES_OF(g_sec_ciphers_info)) static int g_known_cipher_nids[CIPHERS_COUNT] = { NID_aes_128_ecb, NID_aes_192_ecb, NID_aes_256_ecb, NID_aes_128_cbc, NID_aes_192_cbc, NID_aes_256_cbc, NID_aes_128_ctr, NID_aes_192_ctr, NID_aes_256_ctr, NID_aes_128_xts, NID_aes_256_xts, NID_sm4_ctr, NID_sm4_cbc, }; #define SEC_CIPHERS_RETURN_FAIL_IF(cond, mesg, ret)\ if (unlikely(cond)) {\ US_ERR(mesg);\ return (ret);\ }\ static int sec_ciphers_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int encrypt); static int sec_ciphers_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl); static int sec_ciphers_cleanup(EVP_CIPHER_CTX *ctx); static int sec_ciphers_priv_ctx_cleanup(EVP_CIPHER_CTX *ctx); static int sec_ciphers_is_check_valid(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx); static int sec_ciphers_async_do_crypto(cipher_engine_ctx_t *e_cipher_ctx, op_done_t *op_done); static int sec_ciphers_sync_do_crypto(EVP_CIPHER_CTX *ctx, cipher_engine_ctx_t *e_cipher_ctx, cipher_priv_ctx_t *priv_ctx); static int sec_ciphers_init_priv_ctx(cipher_priv_ctx_t *priv_ctx, EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv) { int nid = 0; int ret = KAE_FAIL; if (unlikely(ctx == NULL)) { US_ERR("ctx is NULL"); return ret; } if (unlikely(priv_ctx == NULL)) { US_ERR("priv_ctx is NULL"); goto ERR; } // init encrypt of private ctx priv_ctx->encrypt = EVP_CIPHER_CTX_encrypting(ctx); // init offset of private ctx priv_ctx->offset = 0; // init key of private ctx if (priv_ctx->key == NULL) { priv_ctx->key = (uint8_t *)kae_malloc(EVP_CIPHER_CTX_key_length(ctx)); if (unlikely(priv_ctx->key == NULL)) { US_ERR("malloc key failed."); goto ERR; } } kae_memcpy(priv_ctx->key, key, EVP_CIPHER_CTX_key_length(ctx)); priv_ctx->key_len = EVP_CIPHER_CTX_key_length(ctx); // init iv of private ctx if (priv_ctx->iv == NULL) { priv_ctx->iv = (uint8_t *)kae_malloc(EVP_CIPHER_CTX_iv_length(ctx)); if (unlikely(priv_ctx->iv == NULL)) { US_ERR("malloc iv failed."); goto ERR; } } if (iv != NULL) { kae_memcpy(priv_ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx)); } else { kae_memcpy(priv_ctx->iv, EVP_CIPHER_CTX_iv_noconst(ctx), EVP_CIPHER_CTX_iv_length(ctx)); } priv_ctx->iv_len = EVP_CIPHER_CTX_iv_length(ctx); if (priv_ctx->next_iv == NULL) { priv_ctx->next_iv = (uint8_t *)kae_malloc(priv_ctx->iv_len); if (unlikely(priv_ctx->next_iv == NULL)) { US_ERR("malloc iv failed."); return KAE_FAIL; } } // init cipher mode and alg of private ctx nid = EVP_CIPHER_CTX_nid(ctx); priv_ctx->c_mode = sec_ciphers_get_cipher_mode(nid); priv_ctx->c_alg = sec_ciphers_get_cipher_alg(nid); if (unlikely((priv_ctx->c_mode == NO_C_MODE) || (priv_ctx->c_alg == NO_C_ALG))) { US_ERR("don't support the cipher nid=%d, alg=%d, mode=%d", nid, priv_ctx->c_alg, priv_ctx->c_mode); goto ERR; } priv_ctx->ecb_encryto = NULL; #ifndef OPENSSL_ENABLE_KAE_SMALL_PACKKET_CIPHER_OFFLOADS ret = sec_ciphers_sw_impl_init(ctx, key, iv, priv_ctx->encrypt); if (ret != KAE_SUCCESS) { US_ERR("kae sw iml init failed. ret = %d", ret); goto ERR; } priv_ctx->switch_threshold = (size_t)sec_ciphers_sw_get_threshold(EVP_CIPHER_CTX_nid(ctx)); #endif return KAE_SUCCESS; ERR: US_ERR("sec_ciphers_sec_state_init failed. ctx=%p", ctx); (void)sec_ciphers_priv_ctx_cleanup(ctx); return KAE_FAIL; } static int sec_ciphers_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int encrypt) { cipher_priv_ctx_t *priv_ctx = NULL; if (unlikely((ctx == NULL) || (key == NULL))) { US_ERR("ctx or key is NULL."); return OPENSSL_FAIL; } if (encrypt != EVP_CIPHER_CTX_encrypting(ctx)) { US_ERR("encrypt different, ctx=%p", ctx); return OPENSSL_FAIL; } priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(priv_ctx == NULL)) { US_ERR("sec private ctx is NULL"); return OPENSSL_FAIL; } if (KAE_SUCCESS != sec_ciphers_init_priv_ctx(priv_ctx, ctx, key, iv)) { US_ERR("init failed. ctx=%p", ctx); goto ERR; } US_DEBUG("init success, ctx=%p", ctx); #ifdef KAE_DEBUG_KEY_ENABLE dump_data("key", priv_ctx->key, priv_ctx->key_len); dump_data("iv", priv_ctx->iv, priv_ctx->iv_len); #endif return OPENSSL_SUCCESS; ERR: sec_ciphers_cleanup(ctx); return OPENSSL_SUCCESS; } static void sec_ciphers_update_priv_ctx(cipher_priv_ctx_t *priv_ctx) { uint32_t do_cipher_len = priv_ctx->do_cipher_len; uint32_t increase_counter = 0; if (do_cipher_len == 0) { return; } priv_ctx->in += priv_ctx->do_cipher_len; priv_ctx->out += priv_ctx->do_cipher_len; priv_ctx->left_len -= priv_ctx->do_cipher_len; switch (priv_ctx->c_mode) { case ECB: break; case CBC: if (priv_ctx->encrypt == OPENSSL_ENCRYPTION) { kae_memcpy(priv_ctx->iv, priv_ctx->out - 16, 16); // hardware need 16-byte alignment } else { kae_memcpy(priv_ctx->iv, priv_ctx->next_iv, 16); // hardware need 16-byte alignment } break; case CTR: increase_counter = (do_cipher_len + priv_ctx->offset) >> 4; // right shift 4 sec_ciphers_ctr_iv_inc(priv_ctx->iv, increase_counter); priv_ctx->offset = (priv_ctx->offset + (do_cipher_len & 0xf)) % 16; // hardware need 16-byte alignment break; case XTS: if (priv_ctx->c_alg == AES) { priv_ctx->ecb_encryto->countNum = (priv_ctx->do_cipher_len + priv_ctx->offset) >> 4; // right shift 4 sec_ciphers_xts_iv_inc(priv_ctx); priv_ctx->offset = (priv_ctx->offset + (do_cipher_len & 0xf)) % 16; // hardware need 16-byte alignment } break; default: US_WARN("mode=%d don't support.", priv_ctx->c_mode); break; } US_DEBUG("update priv_ctx success."); return; } static int sec_ciphers_before_dociphers_cb(cipher_priv_ctx_t *priv_ctx) { // store IV for next cbc decryption operation if (priv_ctx->encrypt == OPENSSL_DECRYPTION && priv_ctx->c_mode == CBC) { kae_memcpy(priv_ctx->next_iv, priv_ctx->in + priv_ctx->do_cipher_len - priv_ctx->iv_len, priv_ctx->iv_len); } if (priv_ctx->c_mode == XTS && priv_ctx->c_alg == AES) { if (priv_ctx->ecb_encryto == NULL) { // set XTS PARAM priv_ctx->ecb_encryto = (xts_ecb_data *)kae_malloc(sizeof(xts_ecb_data)); priv_ctx->ecb_encryto->ecb_ctx = EVP_CIPHER_CTX_new(); priv_ctx->ecb_encryto->key2_len = priv_ctx->key_len >> 1; priv_ctx->ecb_encryto->key2 = (uint8_t *)kae_malloc(priv_ctx->key_len >> 1); priv_ctx->ecb_encryto->encryto_iv = (uint8_t *)kae_malloc(priv_ctx->iv_len); priv_ctx->ecb_encryto->iv_out = (uint8_t *)kae_malloc(priv_ctx->iv_len); if (priv_ctx->ecb_encryto == NULL || priv_ctx->ecb_encryto->ecb_ctx == NULL || priv_ctx->ecb_encryto->key2 == NULL || priv_ctx->ecb_encryto->encryto_iv == NULL || priv_ctx->ecb_encryto->iv_out == NULL) { return KAE_FAIL; } if (priv_ctx->ecb_encryto->key2_len == 32) { // 256-xts key2len is 32 priv_ctx->ecb_encryto->cipher_type = EVP_aes_256_ecb(); } else { priv_ctx->ecb_encryto->cipher_type = EVP_aes_128_ecb(); } priv_ctx->ecb_encryto->countNum = 0; kae_memcpy(priv_ctx->ecb_encryto->key2, priv_ctx->key + priv_ctx->ecb_encryto->key2_len, priv_ctx->ecb_encryto->key2_len); } sec_ciphers_ecb_encryt(priv_ctx->ecb_encryto, priv_ctx->ecb_encryto->encryto_iv, priv_ctx->iv, priv_ctx->iv_len); } return KAE_SUCCESS; } static int sec_ciphers_after_dociphers_cb(EVP_CIPHER_CTX *ctx) { // sync priv ctx to next cipher, in case next cipher may be soft cipher return sec_ciphers_sw_hw_ctx_sync(ctx, SEC_CIHPER_SYNC_H2S); } /* * |<--16*n bytes--> |<----16*n bytes------->|<--16*n bytes--->| * |-----------------|<--offset----->|<----->|-----------------| * |<--first cipher----------------->|<---next cipher--------->| * * * to make 16*n align to next cipher data copy to hardware addr should start at * hardware_addr+offset and get out put at hardware_addr+offset * * |<----16*n bytes------>|<--16*n bytes--->| * |<--offset----->|------------------------+ * hardware_addr |<---next cipher-------->| * */ static int sec_ciphers_do_crypto(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) { int ret = KAE_FAIL; // add async parm int job_ret; op_done_t op_done; SEC_CIPHERS_RETURN_FAIL_IF(priv_ctx == NULL, "priv_ctx is NULL.", KAE_FAIL); cipher_engine_ctx_t *e_cipher_ctx = priv_ctx->e_cipher_ctx; SEC_CIPHERS_RETURN_FAIL_IF(e_cipher_ctx == NULL, "e_cipher_ctx is NULL", KAE_FAIL); SEC_CIPHERS_RETURN_FAIL_IF(priv_ctx->inl <= 0, "in length less than or equal to zero.", KAE_FAIL); // packageSize>input_cache_size if (priv_ctx->left_len > INPUT_CACHE_SIZE - priv_ctx->offset) { ret = sec_ciphers_sync_do_crypto(ctx, e_cipher_ctx, priv_ctx); if (ret != 0) { US_ERR("sec sync crypto fail"); return ret; } return KAE_SUCCESS; } // async async_init_op_done(&op_done); if (op_done.job != NULL && kae_is_async_enabled()) { if (async_setup_async_event_notification(0) == 0) { US_ERR("sec async event notifying failed"); async_cleanup_op_done(&op_done); return KAE_FAIL; } } else { US_DEBUG("NO ASYNC Job or async disable, back to SYNC!"); async_cleanup_op_done(&op_done); return sec_ciphers_sync_do_crypto(ctx, e_cipher_ctx, priv_ctx); } if (sec_ciphers_async_do_crypto(e_cipher_ctx, &op_done) == KAE_FAIL) goto err; do { job_ret = async_pause_job(op_done.job, ASYNC_STATUS_OK); if ((job_ret == 0)) { US_DEBUG("- pthread_yidle -"); kae_pthread_yield(); } } while (!op_done.flag || ASYNC_CHK_JOB_RESUMED_UNEXPECTEDLY(job_ret)); if (op_done.verifyRst < 0) { US_ERR("verify result failed with %d", op_done.verifyRst); async_cleanup_op_done(&op_done); return KAE_FAIL; } async_cleanup_op_done(&op_done); US_DEBUG(" Cipher Async Job Finish! priv_ctx = %p\n", priv_ctx); // after cipher cycle should update: in, out, iv, key, length. sec_ciphers_update_priv_ctx(priv_ctx); (void)sec_ciphers_after_dociphers_cb(ctx); return KAE_SUCCESS; err: US_ERR("async job err"); (void)async_clear_async_event_notification(); async_cleanup_op_done(&op_done); return KAE_FAIL; } static int sec_ciphers_sync_do_crypto(EVP_CIPHER_CTX *ctx, cipher_engine_ctx_t *e_cipher_ctx, cipher_priv_ctx_t *priv_ctx) { int ret = KAE_FAIL; int leftlen = priv_ctx->left_len; while (leftlen != 0) { priv_ctx->do_cipher_len = wd_ciphers_get_do_cipher_len(priv_ctx->offset, leftlen); (void)sec_ciphers_before_dociphers_cb(e_cipher_ctx->priv_ctx); wd_ciphers_set_input_data(e_cipher_ctx); ret = wd_ciphers_do_crypto_impl(e_cipher_ctx); if (ret != KAE_SUCCESS) { return ret; } wd_ciphers_get_output_data(e_cipher_ctx); // after cipher cycle should update: in, out, iv, key, length. sec_ciphers_update_priv_ctx(priv_ctx); (void)sec_ciphers_after_dociphers_cb(ctx); leftlen -= priv_ctx->do_cipher_len; } US_DEBUG("sec state update success."); return KAE_SUCCESS; } static int sec_ciphers_async_do_crypto(cipher_engine_ctx_t *e_cipher_ctx, op_done_t *op_done) { int ret = 0; int cnt = 0; cipher_priv_ctx_t *priv_ctx = e_cipher_ctx->priv_ctx; enum task_type type = ASYNC_TASK_CIPHER; void *tag = e_cipher_ctx; priv_ctx->do_cipher_len = wd_ciphers_get_do_cipher_len(priv_ctx->offset, priv_ctx->left_len); (void)sec_ciphers_before_dociphers_cb(e_cipher_ctx->priv_ctx); wd_ciphers_set_input_data(e_cipher_ctx); do { if (cnt > MAX_SEND_TRY_CNTS) { break; } ret = wcrypto_do_cipher(e_cipher_ctx->wd_ctx, &e_cipher_ctx->op_data, tag); if (ret == -WD_EBUSY) { if ((async_wake_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0 || async_pause_job(op_done->job, ASYNC_STATUS_EAGAIN) == 0)) { US_ERR("sec wake job or sec pause job fail!\n"); ret = 0; break; } cnt++; } } while (ret == -WD_EBUSY); if (ret != WD_SUCCESS) { US_ERR("sec async wcryto do cipher failed"); return KAE_FAIL; } if (async_add_poll_task(e_cipher_ctx, op_done, type) == 0) { US_ERR("sec add task failed "); return KAE_FAIL; } return KAE_SUCCESS; } static int sec_ciphers_is_check_valid(EVP_CIPHER_CTX *ctx, cipher_priv_ctx_t *priv_ctx) { if (priv_ctx->switch_threshold > (size_t)priv_ctx->inl) { US_WARN_LIMIT("small packet cipher offload, switch to soft cipher, inl %d", (int)priv_ctx->inl); return KAE_FAIL; } if (sec_ciphers_is_iv_may_overflow(ctx, priv_ctx)) { US_WARN("sec do cipher, the iv will overflow"); return KAE_FAIL; } return KAE_SUCCESS; } static int sec_ciphers_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) { int ret = KAE_FAIL; int num = 0; cipher_priv_ctx_t *priv_ctx = NULL; SEC_CIPHERS_RETURN_FAIL_IF(ctx == NULL, "ctx is NULL", OPENSSL_FAIL); SEC_CIPHERS_RETURN_FAIL_IF(in == NULL, "in is NULL", OPENSSL_FAIL); SEC_CIPHERS_RETURN_FAIL_IF(out == NULL, "out is NULL", OPENSSL_FAIL); priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); SEC_CIPHERS_RETURN_FAIL_IF(priv_ctx == NULL, "ctx cipher data is NULL.", OPENSSL_FAIL); priv_ctx->inl = inl; priv_ctx->in = in; priv_ctx->out = out; priv_ctx->left_len = inl; num = EVP_CIPHER_CTX_num(ctx); ret = sec_ciphers_is_check_valid(ctx, priv_ctx); if (ret != KAE_SUCCESS) { US_WARN_LIMIT("sec cipher check invalid, switch to soft cipher"); goto do_soft_cipher; } if (priv_ctx->e_cipher_ctx == NULL) { priv_ctx->e_cipher_ctx = wd_ciphers_get_engine_ctx(priv_ctx); if (priv_ctx->e_cipher_ctx == NULL) { US_WARN("failed to get engine ctx, switch to soft cipher"); goto do_soft_cipher; } } ret = sec_ciphers_do_crypto(ctx, priv_ctx); if (ret != KAE_SUCCESS) { US_WARN("sec cipher do ciphers failed, switch to soft cipher"); goto do_soft_cipher; } US_DEBUG("do cipher success. ctx=%p, ctx->num=%d, inl=%d", ctx, num, (int)inl); return OPENSSL_SUCCESS; do_soft_cipher: if (priv_ctx->e_cipher_ctx != NULL) { wd_ciphers_put_engine_ctx(priv_ctx->e_cipher_ctx); priv_ctx->e_cipher_ctx = NULL; } if (sec_ciphers_software_encrypt(ctx, priv_ctx) != KAE_SUCCESS) { US_WARN("sec cipher do soft ciphers failed"); return OPENSSL_FAIL; } return OPENSSL_SUCCESS; } static int sec_ciphers_priv_ctx_cleanup(EVP_CIPHER_CTX *ctx) { cipher_priv_ctx_t *priv_ctx = NULL; priv_ctx = (cipher_priv_ctx_t *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(priv_ctx == NULL)) { US_WARN("ctx cipher data is NULL."); return KAE_FAIL; } kae_free(priv_ctx->iv); kae_free(priv_ctx->key); kae_free(priv_ctx->next_iv); (void)wd_ciphers_put_engine_ctx(priv_ctx->e_cipher_ctx); priv_ctx->e_cipher_ctx = NULL; return KAE_SUCCESS; } static int sec_ciphers_cleanup(EVP_CIPHER_CTX *ctx) { if (unlikely(ctx == NULL)) { US_WARN("ctx is NULL"); return OPENSSL_FAIL; } int ret = sec_ciphers_sw_impl_cleanup(ctx); if (ret != KAE_SUCCESS) { US_ERR("Cipher soft impl cleanup failed. ctx=%p", ctx); } ret = sec_ciphers_priv_ctx_cleanup(ctx); if (ret != KAE_SUCCESS) { return OPENSSL_FAIL; } US_DEBUG("Cleanup success, ctx=%p", ctx); return OPENSSL_SUCCESS; } static EVP_CIPHER *sec_ciphers_set_cipher_method(cipher_info_t cipherinfo) { int ret = 1; EVP_CIPHER *cipher = EVP_CIPHER_meth_new(cipherinfo.nid, cipherinfo.blocksize, cipherinfo.keylen); if (cipher == NULL) { return NULL; } ret &= EVP_CIPHER_meth_set_iv_length(cipher, cipherinfo.ivlen); ret &= EVP_CIPHER_meth_set_flags(cipher, cipherinfo.flags); ret &= EVP_CIPHER_meth_set_init(cipher, sec_ciphers_init); ret &= EVP_CIPHER_meth_set_do_cipher(cipher, sec_ciphers_do_cipher); ret &= EVP_CIPHER_meth_set_set_asn1_params(cipher, EVP_CIPHER_set_asn1_iv); ret &= EVP_CIPHER_meth_set_get_asn1_params(cipher, EVP_CIPHER_get_asn1_iv); ret &= EVP_CIPHER_meth_set_cleanup(cipher, sec_ciphers_cleanup); ret &= EVP_CIPHER_meth_set_impl_ctx_size(cipher, sizeof(cipher_priv_ctx_t)); if (ret == 0) { US_WARN("Failed to set cipher methods for nid %d\n", cipherinfo.nid); return NULL; } else { return cipher; } } void sec_create_ciphers(void) { unsigned int i = 0; for (i = 0; i < CIPHERS_COUNT; i++) { if (g_sec_ciphers_info[i].cipher == NULL) { g_sec_ciphers_info[i].cipher = sec_ciphers_set_cipher_method(g_sec_ciphers_info[i]); } } } /****************************************************************************** * function: * sec_engine_ciphers(ENGINE *e, * const EVP_CIPHER **cipher, * const int **nids, * int nid) * * @param e[IN] - OpenSSL engine pointer * @param cipher[IN] - cipher structure pointer * @param nids[IN] - cipher function nids * @param nid[IN] - cipher operation id * * description: * kae engine cipher operations registrar ******************************************************************************/ int sec_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) { UNUSED(e); unsigned int i = 0; if (unlikely((nids == NULL) && ((cipher == NULL) || (nid < 0)))) { US_WARN("Invalid input param."); if (cipher != NULL) { *cipher = NULL; } return OPENSSL_FAIL; } /* No specific cipher => return a list of supported nids ... */ if (cipher == NULL) { if (nids != NULL) { *nids = g_known_cipher_nids; } return BLOCKSIZES_OF(g_sec_ciphers_info); } for (i = 0; i < CIPHERS_COUNT; i++) { if (g_sec_ciphers_info[i].nid == nid) { if (g_sec_ciphers_info[i].cipher == NULL) { sec_create_ciphers(); } *cipher = g_sec_ciphers_info[i].cipher; return OPENSSL_SUCCESS; } } US_WARN("nid = %d not support.", nid); *cipher = NULL; return OPENSSL_FAIL; } void sec_ciphers_free_ciphers(void) { unsigned int i = 0; for (i = 0; i < CIPHERS_COUNT; i++) { if (g_sec_ciphers_info[i].cipher != NULL) { EVP_CIPHER_meth_free(g_sec_ciphers_info[i].cipher); g_sec_ciphers_info[i].cipher = NULL; } } } void sec_ciphers_cb(const void *msg, void *tag) { if (!msg || !tag) { US_ERR("sec cb params err!\n"); return; } struct wcrypto_cipher_msg *message = (struct wcrypto_cipher_msg *)msg; cipher_engine_ctx_t *eng_ctx = (cipher_engine_ctx_t *)tag; kae_memcpy(eng_ctx->priv_ctx->out, message->out, message->out_bytes); } // async poll thread create int sec_cipher_engine_ctx_poll(void *engnine_ctx) { int ret = 0; struct cipher_engine_ctx *eng_ctx = (struct cipher_engine_ctx *)engnine_ctx; struct wd_queue *q = eng_ctx->q_node->kae_wd_queue; POLL_AGAIN: ret = wcrypto_cipher_poll(q, 1); if (!ret) { goto POLL_AGAIN; } else if (ret < 0) { US_ERR("cipher poll failed\n"); return ret; } return ret; } int cipher_module_init(void) { wd_ciphers_init_qnode_pool(); sec_create_ciphers(); // reg async interface here async_register_poll_fn(ASYNC_TASK_CIPHER, sec_cipher_engine_ctx_poll); return 1; } 07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!731 blocks
Locations
Projects
Search
Status Monitor
Help
Open Build Service
OBS Manuals
API Documentation
OBS Portal
Reporting a Bug
Contact
Mailing List
Forums
Chat (IRC)
Twitter
Open Build Service (OBS)
is an
openSUSE project
.
浙ICP备2022010568号-2