riscv: Provide a vector only implementation of Chacha20 cipher

Although we have a Zvkb version of Chacha20, the Zvkb from the RISC-V
Vector Cryptography Bit-manipulation extension was ratified in late 2023
and does not come to the RVA23 Profile. Many CPUs in 2024 currently do not
support Zvkb but may have Vector and Bit-manipulation, which are already in
the RVA22 Profile. This commit provides a vector-only implementation that
replaced the vror with vsll+vsrl+vor and can provide enough speed for
Chacha20 for new CPUs this year.

Signed-off-by: Yangyu Chen <cyy@cyyself.name>

Reviewed-by: Paul Dale <ppzgs1@gmail.com>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24069)
This commit is contained in:
Yangyu Chen 2024-04-19 11:49:59 +08:00 committed by Tomas Mraz
parent 7cbca5a6d6
commit 03ce37e117
4 changed files with 182 additions and 114 deletions

View File

@ -37,9 +37,10 @@
# - RV64I
# - RISC-V Vector ('V') with VLEN >= 128
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
# - RISC-V Basic Bit-manipulation extension ('Zbb')
# - RISC-V Zicclsm(Main memory supports misaligned loads/stores)
# Optional:
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
use strict;
use warnings;
@ -54,15 +55,18 @@ use riscv;
my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
my $use_zvkb = $flavour && $flavour =~ /zvkb/i ? 1 : 0;
my $isaext = "_v_zbb" . ( $use_zvkb ? "_zvkb" : "" );
$output and open STDOUT, ">$output";
my $code = <<___;
.text
___
# void ChaCha20_ctr32_zbb_zvkb(unsigned char *out, const unsigned char *inp,
# size_t len, const unsigned int key[8],
# const unsigned int counter[4]);
# void ChaCha20_ctr32@{[$isaext]}(unsigned char *out, const unsigned char *inp,
# size_t len, const unsigned int key[8],
# const unsigned int counter[4]);
################################################################################
my ( $OUTPUT, $INPUT, $LEN, $KEY, $COUNTER ) = ( "a0", "a1", "a2", "a3", "a4" );
my ( $CONST_DATA0, $CONST_DATA1, $CONST_DATA2, $CONST_DATA3 ) = ( "a5", "a6",
@ -90,6 +94,92 @@ my (
$V22, $V23, $V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31,
) = map( "v$_", ( 0 .. 31 ) );
sub chacha_sub_round {
my (
$A0, $B0, $C0,
$A1, $B1, $C1,
$A2, $B2, $C2,
$A3, $B3, $C3,
$S_A0, $S_B0, $S_C0,
$S_A1, $S_B1, $S_C1,
$S_A2, $S_B2, $S_C2,
$S_A3, $S_B3, $S_C3,
$ROL_SHIFT,
$V_T0, $V_T1, $V_T2, $V_T3,
) = @_;
# a += b; c ^= a; c <<<= $ROL_SHIFT;
if ($use_zvkb) {
my $code = <<___;
@{[vadd_vv $A0, $A0, $B0]}
add $S_A0, $S_A0, $S_B0
@{[vadd_vv $A1, $A1, $B1]}
add $S_A1, $S_A1, $S_B1
@{[vadd_vv $A2, $A2, $B2]}
add $S_A2, $S_A2, $S_B2
@{[vadd_vv $A3, $A3, $B3]}
add $S_A3, $S_A3, $S_B3
@{[vxor_vv $C0, $C0, $A0]}
xor $S_C0, $S_C0, $S_A0
@{[vxor_vv $C1, $C1, $A1]}
xor $S_C1, $S_C1, $S_A1
@{[vxor_vv $C2, $C2, $A2]}
xor $S_C2, $S_C2, $S_A2
@{[vxor_vv $C3, $C3, $A3]}
xor $S_C3, $S_C3, $S_A3
@{[vror_vi $C0, $C0, 32 - $ROL_SHIFT]}
@{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
@{[vror_vi $C1, $C1, 32 - $ROL_SHIFT]}
@{[roriw $S_C1, $S_C1, 32 - $ROL_SHIFT]}
@{[vror_vi $C2, $C2, 32 - $ROL_SHIFT]}
@{[roriw $S_C2, $S_C2, 32 - $ROL_SHIFT]}
@{[vror_vi $C3, $C3, 32 - $ROL_SHIFT]}
@{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
___
return $code;
} else {
my $code = <<___;
@{[vadd_vv $A0, $A0, $B0]}
add $S_A0, $S_A0, $S_B0
@{[vadd_vv $A1, $A1, $B1]}
add $S_A1, $S_A1, $S_B1
@{[vadd_vv $A2, $A2, $B2]}
add $S_A2, $S_A2, $S_B2
@{[vadd_vv $A3, $A3, $B3]}
add $S_A3, $S_A3, $S_B3
@{[vxor_vv $C0, $C0, $A0]}
xor $S_C0, $S_C0, $S_A0
@{[vxor_vv $C1, $C1, $A1]}
xor $S_C1, $S_C1, $S_A1
@{[vxor_vv $C2, $C2, $A2]}
xor $S_C2, $S_C2, $S_A2
@{[vxor_vv $C3, $C3, $A3]}
xor $S_C3, $S_C3, $S_A3
@{[vsll_vi $V_T0, $C0, $ROL_SHIFT]}
@{[vsll_vi $V_T1, $C1, $ROL_SHIFT]}
@{[vsll_vi $V_T2, $C2, $ROL_SHIFT]}
@{[vsll_vi $V_T3, $C3, $ROL_SHIFT]}
@{[vsrl_vi $C0, $C0, 32 - $ROL_SHIFT]}
@{[vsrl_vi $C1, $C1, 32 - $ROL_SHIFT]}
@{[vsrl_vi $C2, $C2, 32 - $ROL_SHIFT]}
@{[vsrl_vi $C3, $C3, 32 - $ROL_SHIFT]}
@{[vor_vv $C0, $C0, $V_T0]}
@{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
@{[vor_vv $C1, $C1, $V_T1]}
@{[roriw $S_C1, $S_C1, 32 - $ROL_SHIFT]}
@{[vor_vv $C2, $C2, $V_T2]}
@{[roriw $S_C2, $S_C2, 32 - $ROL_SHIFT]}
@{[vor_vv $C3, $C3, $V_T3]}
@{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
___
return $code;
}
}
sub chacha_quad_round_group {
my (
$A0, $B0, $C0, $D0,
@ -101,109 +191,59 @@ sub chacha_quad_round_group {
$S_A1, $S_B1, $S_C1, $S_D1,
$S_A2, $S_B2, $S_C2, $S_D2,
$S_A3, $S_B3, $S_C3, $S_D3,
$V_T0, $V_T1, $V_T2, $V_T3,
) = @_;
my $code = <<___;
# a += b; d ^= a; d <<<= 16;
@{[vadd_vv $A0, $A0, $B0]}
add $S_A0, $S_A0, $S_B0
@{[vadd_vv $A1, $A1, $B1]}
add $S_A1, $S_A1, $S_B1
@{[vadd_vv $A2, $A2, $B2]}
add $S_A2, $S_A2, $S_B2
@{[vadd_vv $A3, $A3, $B3]}
add $S_A3, $S_A3, $S_B3
@{[vxor_vv $D0, $D0, $A0]}
xor $S_D0, $S_D0, $S_A0
@{[vxor_vv $D1, $D1, $A1]}
xor $S_D1, $S_D1, $S_A1
@{[vxor_vv $D2, $D2, $A2]}
xor $S_D2, $S_D2, $S_A2
@{[vxor_vv $D3, $D3, $A3]}
xor $S_D3, $S_D3, $S_A3
@{[vror_vi $D0, $D0, 32 - 16]}
@{[roriw $S_D0, $S_D0, 32 - 16]}
@{[vror_vi $D1, $D1, 32 - 16]}
@{[roriw $S_D1, $S_D1, 32 - 16]}
@{[vror_vi $D2, $D2, 32 - 16]}
@{[roriw $S_D2, $S_D2, 32 - 16]}
@{[vror_vi $D3, $D3, 32 - 16]}
@{[roriw $S_D3, $S_D3, 32 - 16]}
@{[chacha_sub_round
$A0, $B0, $D0,
$A1, $B1, $D1,
$A2, $B2, $D2,
$A3, $B3, $D3,
$S_A0, $S_B0, $S_D0,
$S_A1, $S_B1, $S_D1,
$S_A2, $S_B2, $S_D2,
$S_A3, $S_B3, $S_D3,
16,
$V_T0, $V_T1, $V_T2, $V_T3]}
# c += d; b ^= c; b <<<= 12;
@{[vadd_vv $C0, $C0, $D0]}
add $S_C0, $S_C0, $S_D0
@{[vadd_vv $C1, $C1, $D1]}
add $S_C1, $S_C1, $S_D1
@{[vadd_vv $C2, $C2, $D2]}
add $S_C2, $S_C2, $S_D2
@{[vadd_vv $C3, $C3, $D3]}
add $S_C3, $S_C3, $S_D3
@{[vxor_vv $B0, $B0, $C0]}
xor $S_B0, $S_B0, $S_C0
@{[vxor_vv $B1, $B1, $C1]}
xor $S_B1, $S_B1, $S_C1
@{[vxor_vv $B2, $B2, $C2]}
xor $S_B2, $S_B2, $S_C2
@{[vxor_vv $B3, $B3, $C3]}
xor $S_B3, $S_B3, $S_C3
@{[vror_vi $B0, $B0, 32 - 12]}
@{[roriw $S_B0, $S_B0, 32 - 12]}
@{[vror_vi $B1, $B1, 32 - 12]}
@{[roriw $S_B1, $S_B1, 32 - 12]}
@{[vror_vi $B2, $B2, 32 - 12]}
@{[roriw $S_B2, $S_B2, 32 - 12]}
@{[vror_vi $B3, $B3, 32 - 12]}
@{[roriw $S_B3, $S_B3, 32 - 12]}
@{[chacha_sub_round
$C0, $D0, $B0,
$C1, $D1, $B1,
$C2, $D2, $B2,
$C3, $D3, $B3,
$S_C0, $S_D0, $S_B0,
$S_C1, $S_D1, $S_B1,
$S_C2, $S_D2, $S_B2,
$S_C3, $S_D3, $S_B3,
12,
$V_T0, $V_T1, $V_T2, $V_T3]}
# a += b; d ^= a; d <<<= 8;
@{[vadd_vv $A0, $A0, $B0]}
add $S_A0, $S_A0, $S_B0
@{[vadd_vv $A1, $A1, $B1]}
add $S_A1, $S_A1, $S_B1
@{[vadd_vv $A2, $A2, $B2]}
add $S_A2, $S_A2, $S_B2
@{[vadd_vv $A3, $A3, $B3]}
add $S_A3, $S_A3, $S_B3
@{[vxor_vv $D0, $D0, $A0]}
xor $S_D0, $S_D0, $S_A0
@{[vxor_vv $D1, $D1, $A1]}
xor $S_D1, $S_D1, $S_A1
@{[vxor_vv $D2, $D2, $A2]}
xor $S_D2, $S_D2, $S_A2
@{[vxor_vv $D3, $D3, $A3]}
xor $S_D3, $S_D3, $S_A3
@{[vror_vi $D0, $D0, 32 - 8]}
@{[roriw $S_D0, $S_D0, 32 - 8]}
@{[vror_vi $D1, $D1, 32 - 8]}
@{[roriw $S_D1, $S_D1, 32 - 8]}
@{[vror_vi $D2, $D2, 32 - 8]}
@{[roriw $S_D2, $S_D2, 32 - 8]}
@{[vror_vi $D3, $D3, 32 - 8]}
@{[roriw $S_D3, $S_D3, 32 - 8]}
@{[chacha_sub_round
$A0, $B0, $D0,
$A1, $B1, $D1,
$A2, $B2, $D2,
$A3, $B3, $D3,
$S_A0, $S_B0, $S_D0,
$S_A1, $S_B1, $S_D1,
$S_A2, $S_B2, $S_D2,
$S_A3, $S_B3, $S_D3,
8,
$V_T0, $V_T1, $V_T2, $V_T3]}
# c += d; b ^= c; b <<<= 7;
@{[vadd_vv $C0, $C0, $D0]}
add $S_C0, $S_C0, $S_D0
@{[vadd_vv $C1, $C1, $D1]}
add $S_C1, $S_C1, $S_D1
@{[vadd_vv $C2, $C2, $D2]}
add $S_C2, $S_C2, $S_D2
@{[vadd_vv $C3, $C3, $D3]}
add $S_C3, $S_C3, $S_D3
@{[vxor_vv $B0, $B0, $C0]}
xor $S_B0, $S_B0, $S_C0
@{[vxor_vv $B1, $B1, $C1]}
xor $S_B1, $S_B1, $S_C1
@{[vxor_vv $B2, $B2, $C2]}
xor $S_B2, $S_B2, $S_C2
@{[vxor_vv $B3, $B3, $C3]}
xor $S_B3, $S_B3, $S_C3
@{[vror_vi $B0, $B0, 32 - 7]}
@{[roriw $S_B0, $S_B0, 32 - 7]}
@{[vror_vi $B1, $B1, 32 - 7]}
@{[roriw $S_B1, $S_B1, 32 - 7]}
@{[vror_vi $B2, $B2, 32 - 7]}
@{[roriw $S_B2, $S_B2, 32 - 7]}
@{[vror_vi $B3, $B3, 32 - 7]}
@{[roriw $S_B3, $S_B3, 32 - 7]}
@{[chacha_sub_round
$C0, $D0, $B0,
$C1, $D1, $B1,
$C2, $D2, $B2,
$C3, $D3, $B3,
$S_C0, $S_D0, $S_B0,
$S_C1, $S_D1, $S_B1,
$S_C2, $S_D2, $S_B2,
$S_C3, $S_D3, $S_B3,
7,
$V_T0, $V_T1, $V_T2, $V_T3]}
___
return $code;
@ -211,9 +251,9 @@ ___
$code .= <<___;
.p2align 3
.globl ChaCha20_ctr32_zbb_zvkb
.type ChaCha20_ctr32_zbb_zvkb,\@function
ChaCha20_ctr32_zbb_zvkb:
.globl ChaCha20_ctr32@{[$isaext]}
.type ChaCha20_ctr32@{[$isaext]},\@function
ChaCha20_ctr32@{[$isaext]}:
addi sp, sp, -96
sd s0, 0(sp)
sd s1, 8(sp)
@ -305,7 +345,8 @@ ChaCha20_ctr32_zbb_zvkb:
$STATE0, $STATE4, $STATE8, $STATE12,
$STATE1, $STATE5, $STATE9, $STATE13,
$STATE2, $STATE6, $STATE10, $STATE14,
$STATE3, $STATE7, $STATE11, $STATE15]}
$STATE3, $STATE7, $STATE11, $STATE15,
$V24, $V25, $V26, $V27]}
@{[chacha_quad_round_group
$V3, $V4, $V9, $V14,
$V0, $V5, $V10, $V15,
@ -314,7 +355,8 @@ ChaCha20_ctr32_zbb_zvkb:
$STATE3, $STATE4, $STATE9, $STATE14,
$STATE0, $STATE5, $STATE10, $STATE15,
$STATE1, $STATE6, $STATE11, $STATE12,
$STATE2, $STATE7, $STATE8, $STATE13]}
$STATE2, $STATE7, $STATE8, $STATE13,
$V24, $V25, $V26, $V27]}
bnez $T0, .Lround_loop
li $T0, 64
@ -468,7 +510,7 @@ ChaCha20_ctr32_zbb_zvkb:
addi sp, sp, 96
ret
.size ChaCha20_ctr32_zbb_zvkb,.-ChaCha20_ctr32_zbb_zvkb
.size ChaCha20_ctr32@{[$isaext]},.-ChaCha20_ctr32@{[$isaext]}
___
print $code;

View File

@ -22,7 +22,7 @@ IF[{- !$disabled{asm} -}]
$CHACHAASM_c64xplus=chacha-c64xplus.s
$CHACHAASM_riscv64=chacha_riscv.c chacha_enc.c chacha-riscv64-zbb-zvkb.s
$CHACHAASM_riscv64=chacha_riscv.c chacha_enc.c chacha-riscv64-v-zbb.s chacha-riscv64-v-zbb-zvkb.s
$CHACHADEF_riscv64=INCLUDE_C_CHACHA20
# Now that we have defined all the arch specific variables, use the
@ -53,4 +53,5 @@ GENERATE[chacha-s390x.S]=asm/chacha-s390x.pl
GENERATE[chacha-ia64.S]=asm/chacha-ia64.pl
GENERATE[chacha-ia64.s]=chacha-ia64.S
GENERATE[chacha-loongarch64.S]=asm/chacha-loongarch64.pl
GENERATE[chacha-riscv64-zbb-zvkb.s]=asm/chacha-riscv64-zbb-zvkb.pl
GENERATE[chacha-riscv64-v-zbb.s]=asm/chacha-riscv64-v-zbb.pl
GENERATE[chacha-riscv64-v-zbb-zvkb.s]=asm/chacha-riscv64-v-zbb.pl zvkb

View File

@ -40,16 +40,23 @@
#include "crypto/chacha.h"
#include "crypto/riscv_arch.h"
void ChaCha20_ctr32_zbb_zvkb(unsigned char *out, const unsigned char *inp,
size_t len, const unsigned int key[8],
const unsigned int counter[4]);
void ChaCha20_ctr32_v_zbb_zvkb(unsigned char *out, const unsigned char *inp,
size_t len, const unsigned int key[8],
const unsigned int counter[4]);
void ChaCha20_ctr32_v_zbb(unsigned char *out, const unsigned char *inp,
size_t len, const unsigned int key[8],
const unsigned int counter[4]);
void ChaCha20_ctr32(unsigned char *out, const unsigned char *inp, size_t len,
const unsigned int key[8], const unsigned int counter[4])
{
if (len > CHACHA_BLK_SIZE && RISCV_HAS_ZVKB() && RISCV_HAS_ZBB() &&
riscv_vlen() >= 128) {
ChaCha20_ctr32_zbb_zvkb(out, inp, len, key, counter);
if (len > CHACHA_BLK_SIZE && RISCV_HAS_ZBB() && riscv_vlen() >= 128) {
if (RISCV_HAS_ZVKB()) {
ChaCha20_ctr32_v_zbb_zvkb(out, inp, len, key, counter);
} else {
ChaCha20_ctr32_v_zbb(out, inp, len, key, counter);
}
} else {
ChaCha20_ctr32_c(out, inp, len, key, counter);
}

View File

@ -598,6 +598,15 @@ sub vmv_v_v {
return ".word ".($template | ($vs1 << 15) | ($vd << 7));
}
sub vor_vv {
# vor.vv vd, vs2, vs1
my $template = 0b0010101_00000_00000_000_00000_1010111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
my $vs1 = read_vreg shift;
return ".word ".($template | ($vs2 << 20) | ($vs1 << 15) | ($vd << 7));
}
sub vor_vv_v0t {
# vor.vv vd, vs2, vs1, v0.t
my $template = 0b0010100_00000_00000_000_00000_1010111;
@ -747,6 +756,15 @@ sub vsll_vi {
return ".word ".($template | ($vs2 << 20) | ($uimm << 15) | ($vd << 7));
}
sub vsrl_vi {
# vsrl.vi vd, vs2, uimm, vm
my $template = 0b1010001_00000_00000_011_00000_1010111;
my $vd = read_vreg shift;
my $vs2 = read_vreg shift;
my $uimm = shift;
return ".word ".($template | ($vs2 << 20) | ($uimm << 15) | ($vd << 7));
}
sub vsrl_vx {
# vsrl.vx vd, vs2, rs1
my $template = 0b1010001_00000_00000_100_00000_1010111;