aboutsummaryrefslogtreecommitdiff
path: root/security/ktls_isa-l_crypto-kmod
diff options
context:
space:
mode:
authorMark Linimon <linimon@FreeBSD.org>2019-09-29 01:09:57 +0000
committerMark Linimon <linimon@FreeBSD.org>2019-09-29 01:09:57 +0000
commit834e1d8c3555c1a7327be7fe0f08e8b12a7d8a15 (patch)
treea0358b38513a0b81850670469c0468fc4cbb846d /security/ktls_isa-l_crypto-kmod
parent768af6de1c4120c7c561eb038d1d8c6cd073f531 (diff)
downloadports-834e1d8c3555c1a7327be7fe0f08e8b12a7d8a15.tar.gz
ports-834e1d8c3555c1a7327be7fe0f08e8b12a7d8a15.zip
Kernel module containing a KTLS software backend for AES-GCM connections
using Intel's ISA-L crypto library. Only for amd64 and only for very recent -CURRENT. Submitted by: jhb Reviewed by: gallatin Differential Revision: D21446
Notes
Notes: svn path=/head/; revision=513187
Diffstat (limited to 'security/ktls_isa-l_crypto-kmod')
-rw-r--r--security/ktls_isa-l_crypto-kmod/Makefile40
-rw-r--r--security/ktls_isa-l_crypto-kmod/distinfo3
-rw-r--r--security/ktls_isa-l_crypto-kmod/files/Makefile64
-rw-r--r--security/ktls_isa-l_crypto-kmod/files/intelisa_kern.c401
-rw-r--r--security/ktls_isa-l_crypto-kmod/pkg-descr8
5 files changed, 516 insertions, 0 deletions
diff --git a/security/ktls_isa-l_crypto-kmod/Makefile b/security/ktls_isa-l_crypto-kmod/Makefile
new file mode 100644
index 000000000000..32719c632a65
--- /dev/null
+++ b/security/ktls_isa-l_crypto-kmod/Makefile
@@ -0,0 +1,40 @@
+# Created by: Michal Bielicki <m.bielicki@llizardfs.com>
+# $FreeBSD$
+
+PORTNAME= isa-l_crypto
+PORTVERSION= 2.21.0
+DISTVERSIONPREFIX= v
+CATEGORIES= security
+PKGNAMEPREFIX= ktls_
+PKGNAMESUFFIX= -kmod
+
+MAINTAINER= gallatin@FreeBSD.org
+COMMENT= KTLS module using Intel(R) ISA-L crypto
+
+LICENSE= BSD3CLAUSE
+LICENSE_FILE= ${WRKSRC}/LICENSE
+
+ONLY_FOR_ARCHS= amd64
+
+BUILD_DEPENDS= yasm:devel/yasm
+
+USES= kmod uidfix
+
+USE_GITHUB= yes
+GH_ACCOUNT= 01org
+
+MAKE_ENV+= ISASRC=${WRKSRC}
+
+PLIST_FILES= ${KMODDIR}/ktls_intel-isa-l.ko
+
+.include <bsd.port.pre.mk>
+
+.if !exists(${SRC_BASE}/sys/sys/ktls.h)
+IGNORE= requires KTLS support
+.endif
+
+post-extract:
+ ${CP} ${FILESDIR}/* ${WRKSRC}/
+ ${ECHO} "#include <sys/stdint.h>" > ${WRKSRC}/include/stdint.h
+
+.include <bsd.port.post.mk>
diff --git a/security/ktls_isa-l_crypto-kmod/distinfo b/security/ktls_isa-l_crypto-kmod/distinfo
new file mode 100644
index 000000000000..323c470f4e5d
--- /dev/null
+++ b/security/ktls_isa-l_crypto-kmod/distinfo
@@ -0,0 +1,3 @@
+TIMESTAMP = 1566856170
+SHA256 (01org-isa-l_crypto-v2.21.0_GH0.tar.gz) = 58284d7e5e60c37d6d4c1bbc2a876e95d14ca153443775a491f91c21a4243171
+SIZE (01org-isa-l_crypto-v2.21.0_GH0.tar.gz) = 502413
diff --git a/security/ktls_isa-l_crypto-kmod/files/Makefile b/security/ktls_isa-l_crypto-kmod/files/Makefile
new file mode 100644
index 000000000000..8761ea789fa8
--- /dev/null
+++ b/security/ktls_isa-l_crypto-kmod/files/Makefile
@@ -0,0 +1,64 @@
+#
+# Copyright (c) 2016-2019 Netflix, Inc
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer,
+# without modification.
+# 2. Redistributions in binary form must reproduce at minimum a disclaimer
+# similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+# redistribution must be conditioned upon including a substantially
+# similar Disclaimer requirement for further binary redistribution.
+#
+# NO WARRANTY
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGES.
+#
+# $FreeBSD$
+#
+
+ISAINC=${ISASRC}/include
+ISAAES=${ISASRC}/aes
+INCS=-I${ISAINC} -I${ISAAES}
+CFLAGS+=${INCS}
+YASM=${LOCALBASE}/bin/yasm
+
+.PATH: ${ISASRC}/aes
+.SUFFIXES: .asm
+.asm.o:
+ ${YASM} -g dwarf2 -f elf64 ${INCS} -o ${.TARGET} ${.IMPSRC}
+
+KMOD= ktls_intel-isa-l
+SRCS= gcm128_avx_gen2.asm \
+ gcm128_avx_gen4.asm \
+ gcm128_sse.asm \
+ gcm256_avx_gen2.asm \
+ gcm256_avx_gen4.asm \
+ gcm256_sse.asm \
+ gcm_multibinary.asm \
+ gcm128_avx_gen2_nt.asm \
+ gcm128_avx_gen4_nt.asm \
+ gcm128_sse_nt.asm \
+ gcm256_avx_gen2_nt.asm \
+ gcm256_avx_gen4_nt.asm \
+ gcm256_sse_nt.asm \
+ gcm_multibinary_nt.asm \
+ keyexp_128.asm \
+ keyexp_192.asm \
+ keyexp_256.asm \
+ keyexp_multibinary.asm \
+ gcm_pre.c \
+ intelisa_kern.c
+
+.include <bsd.kmod.mk>
diff --git a/security/ktls_isa-l_crypto-kmod/files/intelisa_kern.c b/security/ktls_isa-l_crypto-kmod/files/intelisa_kern.c
new file mode 100644
index 000000000000..fd846724a00d
--- /dev/null
+++ b/security/ktls_isa-l_crypto-kmod/files/intelisa_kern.c
@@ -0,0 +1,401 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2014-2018 Netflix Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/ktls.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sockbuf.h>
+#include <sys/filedesc.h>
+#include <sys/sysctl.h>
+#include <sys/counter.h>
+#include <sys/uio.h>
+#include <sys/module.h>
+#include <opencrypto/xform.h>
+#include <machine/fpu.h>
+
+#include "aes_gcm.h"
+
+#define KTLS_INTELISA_AEAD_TAGLEN 16
+
+struct isa_gcm_struct {
+ struct gcm_key_data key_data;
+ struct gcm_context_data ctx_data;
+ void (*gcm_pre) (const void *key, struct gcm_key_data *); /* Done once per key */
+ void (*gcm_init) (const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *iv,
+ uint8_t const *aad,
+ uint64_t aad_len); /* Done at start of crypt */
+ void (*gcm_upd) (const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out,
+ const uint8_t *in,
+ uint64_t len); /* With each block of data */
+ void (*gcm_upd_nt) (const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out,
+ const uint8_t *in,
+ uint64_t len); /* With each block of data */
+ void (*gcm_final) (const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *tag,
+ uint64_t tag_len); /* Pulls out the tag */
+};
+
+SYSCTL_DECL(_kern_ipc_tls);
+
+static int ktls_use_intel_isa_gcm = 1;
+SYSCTL_INT(_kern_ipc_tls, OID_AUTO, isa_gcm, CTLFLAG_RW,
+ &ktls_use_intel_isa_gcm, 1,
+ "Should we use the Intel ISA GCM if available");
+
+SYSCTL_DECL(_kern_ipc_tls_stats);
+
+static counter_u64_t ktls_offload_isa_aead;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aead_crypts,
+ CTLFLAG_RD, &ktls_offload_isa_aead,
+ "Total number of Intel ISA TLS AEAD encrypts called");
+
+static counter_u64_t ktls_offload_isa_tls_13;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_tls_13_crypts,
+ CTLFLAG_RD, &ktls_offload_isa_tls_13,
+ "Total number of Intel ISA TLS 1.3 encrypts called");
+
+static counter_u64_t ktls_offload_isa_tls_12;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_tls_12_crypts,
+ CTLFLAG_RD, &ktls_offload_isa_tls_12,
+ "Total number of Intel ISA TLS 1.2 encrypts called");
+
+static counter_u64_t intelisa_unaligned_mem_b;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_unaligned_bytes,
+ CTLFLAG_RD, &intelisa_unaligned_mem_b,
+ "Byte cnt of intel isa unaligned");
+
+static counter_u64_t intelisa_aligned_mem_b;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aligned_bytes,
+ CTLFLAG_RD, &intelisa_aligned_mem_b,
+ "Byte cnt of intel isa aligned");
+
+static counter_u64_t intelisa_unaligned_mem;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_unaligned,
+ CTLFLAG_RD, &intelisa_unaligned_mem,
+ "Call cnt of intel isa unaligned");
+
+static counter_u64_t intelisa_aligned_mem;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, isa_aligned,
+ CTLFLAG_RD, &intelisa_aligned_mem,
+ "Call cnt of intel isa aligned");
+
+static MALLOC_DEFINE(M_INTEL_ISA, "isal_tls", "Intel ISA-L TLS");
+
+static int
+intel_isa_seal(struct isa_gcm_struct *isa,
+ struct iovec *outiov, int numiovs,
+ uint8_t *static_iv, int iv_len, uint64_t seq,
+ struct iovec *iniov,
+ uint8_t * ad, int adlen,
+ uint8_t * tagout, size_t *taglen,
+ bool tls_13, uint8_t tls_13_rtype)
+{
+ int i;
+ bool nt = true;
+ bool misaligned_len, misaligned_start;
+ int fixup = 0;
+ size_t offset;
+ uint8_t *in;
+ uint8_t *out;
+ uint64_t len;
+ uint8_t iv[32];
+ uint8_t seq_num[sizeof(seq)];
+
+ if (iv_len > 32 - sizeof(seq)) {
+ return (-1);
+ }
+
+ if (tls_13) {
+ /*
+ * RFC 8446 5.3: left pad the 64b seqno
+ * with 0s, and xor with the IV
+ *
+ * gcm_init does not provde a way to specify the
+ * length of the iv, so we have hard-coded it to 12 in
+ * openssl
+ */
+ memcpy(seq_num, &seq, sizeof(seq));
+
+ offset = iv_len - sizeof(seq);
+ memcpy(iv, static_iv, offset);
+ for (i = 0; i < sizeof(seq); i++)
+ iv[i + offset] = static_iv[i + offset] ^ seq_num[i];
+ } else {
+ memcpy(iv, static_iv, iv_len);
+ memcpy(iv + iv_len, &seq, sizeof(seq));
+ }
+ isa->gcm_init(&isa->key_data, &isa->ctx_data, iv, ad, (size_t)adlen);
+ for (i = 0; i < numiovs; i++) {
+ in = iniov[i].iov_base;
+ out = outiov[i].iov_base;
+ len = iniov[i].iov_len;
+
+ misaligned_start = ((uintptr_t)in & 0xf) != 0;
+ misaligned_len = (len & 0xf) != 0;
+
+ if (misaligned_start || misaligned_len) {
+ /*
+ * Try to do as much of a page using
+ * non-temporals as we possibly can, and leave
+ * a ragged tail as a separate chunk.
+ */
+ if (nt && !misaligned_start && len > 0xf) {
+ len = len & ~0xf;
+ fixup = iniov[i].iov_len - len;
+ } else {
+ nt = false;
+ }
+ }
+fixup_done:
+ if (nt) {
+ isa->gcm_upd_nt(&isa->key_data, &isa->ctx_data, out, in, len);
+ counter_u64_add(intelisa_aligned_mem, 1);
+ counter_u64_add(intelisa_aligned_mem_b, len);
+ } else {
+ isa->gcm_upd(&isa->key_data, &isa->ctx_data, out, in, len);
+ counter_u64_add(intelisa_unaligned_mem, 1);
+ counter_u64_add(intelisa_unaligned_mem_b, len);
+ }
+ if (fixup) {
+ in += len;
+ out += len;
+ len = fixup;
+ fixup = 0;
+ nt = false;
+ goto fixup_done;
+ }
+ }
+ if (tls_13) {
+ *tagout = tls_13_rtype;
+ isa->gcm_upd(&isa->key_data, &isa->ctx_data, tagout,
+ tagout, 1);
+ tagout += 1;
+ }
+ isa->gcm_final(&isa->key_data, &isa->ctx_data, tagout, *taglen);
+ return (0);
+}
+
+static int
+ktls_intelisa_aead_encrypt(struct ktls_session *tls,
+ const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
+ struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t tls_rtype)
+{
+ struct isa_gcm_struct *isa;
+ struct tls_aead_data ad;
+ struct tls_nonce_data nd;
+ size_t adlen, taglen;
+ uint8_t *adptr;
+ int ret;
+ uint16_t tls_comp_len;
+ bool tls_13;
+
+ isa = (struct isa_gcm_struct *)tls->cipher;
+
+ KASSERT(isa != NULL, ("Null cipher"));
+ counter_u64_add(ktls_offload_isa_aead, 1);
+ taglen = KTLS_INTELISA_AEAD_TAGLEN;
+
+ if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) {
+ tls_13 = true;
+ counter_u64_add(ktls_offload_isa_tls_13, 1);
+ adlen = sizeof(ad) - sizeof(ad.seq);
+ adptr = &ad.type;
+ ad.tls_length = hdr->tls_length;
+
+ } else {
+ tls_13 = false;
+ counter_u64_add(ktls_offload_isa_tls_12, 1);
+ tls_comp_len = ntohs(hdr->tls_length) -
+ (KTLS_INTELISA_AEAD_TAGLEN + sizeof(nd.seq));
+ adlen = sizeof(ad);
+ adptr = (uint8_t *)&ad;
+ ad.tls_length = htons(tls_comp_len);
+ }
+ /* Setup the associated data */
+ ad.seq = htobe64(seqno);
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+
+ ret = intel_isa_seal(isa, outiov, iovcnt,
+ tls->params.iv, tls->params.iv_len,
+ htobe64(seqno), iniov,
+ adptr, adlen, trailer, &taglen,
+ tls_13, tls_rtype);
+
+ return(ret);
+}
+
+
+static int
+ktls_intelisa_setup_cipher(struct isa_gcm_struct *isa, uint8_t *key)
+{
+ struct fpu_kern_ctx *fpu_ctx;
+
+ if (key == NULL) {
+ return (EINVAL);
+ }
+ fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NOWAIT);
+ if (fpu_ctx == NULL) {
+ return (ENOMEM);
+ }
+ fpu_kern_enter(curthread, fpu_ctx, FPU_KERN_NORMAL);
+ isa->gcm_pre(key, &isa->key_data);
+ fpu_kern_leave(curthread, fpu_ctx);
+ fpu_kern_free_ctx(fpu_ctx);
+ return (0);
+}
+
+static void
+ktls_intelisa_free(struct ktls_session *tls)
+{
+ struct isa_gcm_struct *isa;
+
+ isa = tls->cipher;
+ explicit_bzero(isa, sizeof(*isa));
+ free(isa, M_INTEL_ISA);
+}
+
+static int
+ktls_intelisa_try(struct socket *so, struct ktls_session *tls)
+{
+ struct isa_gcm_struct *isa;
+ int error;
+
+ if (ktls_use_intel_isa_gcm &&
+ tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
+ isa = malloc(sizeof (*isa), M_INTEL_ISA, M_NOWAIT | M_ZERO);
+ if (isa == NULL) {
+ return (ENOMEM);
+ }
+ switch (tls->params.cipher_key_len) {
+ case 16:
+ isa->gcm_pre = aes_gcm_pre_128;
+ isa->gcm_init = aes_gcm_init_128;
+ isa->gcm_upd = aes_gcm_enc_128_update;
+ isa->gcm_upd_nt = aes_gcm_enc_128_update_nt;
+ isa->gcm_final = aes_gcm_enc_128_finalize;
+ break;
+ case 32:
+ isa->gcm_pre = aes_gcm_pre_256;
+ isa->gcm_init = aes_gcm_init_256;
+ isa->gcm_upd = aes_gcm_enc_256_update;
+ isa->gcm_upd_nt = aes_gcm_enc_256_update_nt;
+ isa->gcm_final = aes_gcm_enc_256_finalize;
+ break;
+ default:
+ free(isa, M_INTEL_ISA);
+ return (EOPNOTSUPP);
+ }
+
+ error = ktls_intelisa_setup_cipher(isa, tls->params.cipher_key);
+ if (error) {
+ free(isa, M_INTEL_ISA);
+ return (error);
+ }
+
+ tls->cipher = isa;
+ tls->sw_encrypt = ktls_intelisa_aead_encrypt;
+ tls->free = ktls_intelisa_free;
+ return (0);
+ }
+ return (EOPNOTSUPP);
+}
+
+struct ktls_crypto_backend intelisa_backend = {
+ .name = "Intel ISA-L",
+ .prio = 20,
+ .api_version = KTLS_API_VERSION,
+ .try = ktls_intelisa_try,
+};
+
+static int
+intelisa_init(void)
+{
+ ktls_offload_isa_aead = counter_u64_alloc(M_WAITOK);
+ ktls_offload_isa_tls_12 = counter_u64_alloc(M_WAITOK);
+ ktls_offload_isa_tls_13 = counter_u64_alloc(M_WAITOK);
+ intelisa_aligned_mem = counter_u64_alloc(M_WAITOK);
+ intelisa_aligned_mem_b = counter_u64_alloc(M_WAITOK);
+ intelisa_unaligned_mem = counter_u64_alloc(M_WAITOK);
+ intelisa_unaligned_mem_b = counter_u64_alloc(M_WAITOK);
+ return (ktls_crypto_backend_register(&intelisa_backend));
+}
+
+static int
+intelisa_unload(void)
+{
+ int error;
+
+ error = ktls_crypto_backend_deregister(&intelisa_backend);
+ if (error)
+ return (error);
+ counter_u64_free(ktls_offload_isa_aead);
+ counter_u64_free(intelisa_aligned_mem);
+ counter_u64_free(intelisa_aligned_mem_b);
+ counter_u64_free(intelisa_unaligned_mem);
+ counter_u64_free(intelisa_unaligned_mem_b);
+ return (0);
+}
+
+static int
+intelisa_module_event_handler(module_t mod, int evt, void *arg)
+{
+ switch (evt) {
+ case MOD_LOAD:
+ return (intelisa_init());
+ case MOD_UNLOAD:
+ return (intelisa_unload());
+ default:
+ return (EOPNOTSUPP);
+ }
+}
+
+static moduledata_t intelisa_moduledata = {
+ "intelisa",
+ intelisa_module_event_handler,
+ NULL
+};
+
+DECLARE_MODULE(intelisa, intelisa_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);
diff --git a/security/ktls_isa-l_crypto-kmod/pkg-descr b/security/ktls_isa-l_crypto-kmod/pkg-descr
new file mode 100644
index 000000000000..8a16c32695ee
--- /dev/null
+++ b/security/ktls_isa-l_crypto-kmod/pkg-descr
@@ -0,0 +1,8 @@
+ISA-L crypto is a collection of optimized low-level functions targeting
+cryptography applications.
+
+This port provides a software encryption module for in-kernel TLS
+(KTLS) that uses routines in the ISA-L crypto library to encrypt TLS
+records.
+
+WWW: https://github.com/01org/isa-l_crypto